diff --git a/.gitattributes b/.gitattributes index dd1d180962a11e8a9d28952651b0773b6680ab04..b118c4abf5e0121e99c7dceb3b31e5b79c89e538 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1284,3 +1284,11 @@ data/2025/2504_06xxx/2504.06261/000d1d7e-ab84-4037-a349-69f333ac45e9_origin.pdf data/2025/2504_06xxx/2504.06263/30e417a2-2609-4ff1-95ae-cf0382220f6f_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_06xxx/2504.06397/d704b2e6-2c04-4966-b818-dc796c22634f_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_06xxx/2504.06632/6418d473-80e2-437f-be9d-f7a58bd3474e_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_05xxx/2504.05758/607ad388-a3c2-41a7-ba56-f696cce741af_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_05xxx/2504.05786/e603c6f4-386e-4380-abf7-2f18915b0ee6_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_05xxx/2504.05792/2e1d882c-c0bf-4c6f-98d8-3ae7d8fb9f26_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_05xxx/2504.05812/2ebd62c4-e647-47e7-bb58-1c94267578a3_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_05xxx/2504.05862/f5f20bac-767d-4260-82e6-943416d1d631_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_05xxx/2504.05888/a802a693-b787-42ea-9506-1d20b2ad6f02_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_06xxx/2504.06122/1f4b320c-f80c-4d99-b14d-3a49e20634f9_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_06xxx/2504.06196/00c24729-2bee-4726-a0b4-13a163fe9cf3_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/data/2025/2504_05xxx/2504.05758/607ad388-a3c2-41a7-ba56-f696cce741af_content_list.json b/data/2025/2504_05xxx/2504.05758/607ad388-a3c2-41a7-ba56-f696cce741af_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..a8b40fe4c2af49f761c93dac56a037b383d453bd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05758/607ad388-a3c2-41a7-ba56-f696cce741af_content_list.json @@ -0,0 +1,737 @@ +[ + { + "type": "text", + "text": "Addressing Class Imbalance with Probabilistic Graphical Models and Variational Inference", + "text_level": 1, + "bbox": [ + 130, + 69, + 867, + 137 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yujia Lou \nUniversity of Rochester \nRochester, USA", + "bbox": [ + 130, + 143, + 277, + 190 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jie Liu \nUniversity of Minnesota \nMinneapolis, USA", + "bbox": [ + 424, + 143, + 573, + 190 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yuan Sheng Northeastern University Seattle, USA", + "bbox": [ + 722, + 143, + 867, + 184 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jiawei Wang \nUniversity of California Los Angeles, USA", + "bbox": [ + 130, + 220, + 274, + 263 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yiwei Zhang Cornell University Ithaca, USA", + "bbox": [ + 441, + 220, + 555, + 262 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yaokun Ren* Northeastern University Seattle, USA", + "bbox": [ + 722, + 220, + 867, + 263 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract-This study proposes a method for imbalanced data classification based on deep probabilistic graphical models (DPGMs) to solve the problem that traditional methods have insufficient learning ability for minority class samples. To address the classification bias caused by class imbalance, we introduce variational inference optimization probability modeling, which enables the model to adaptively adjust the representation ability of minority classes and combines the class-aware weight adjustment strategy to enhance the classifier's sensitivity to minority classes. In addition, we combine the adversarial learning mechanism to generate minority class samples in the latent space so that the model can better characterize the category boundary in the high-dimensional feature space. The experiment is evaluated on the Kaggle \"Credit Card Fraud Detection\" dataset and compared with a variety of advanced imbalanced classification methods (such as GAN-based sampling, BRF, XGBoost-Cost Sensitive, SAAD, HAN). The results show that the method in this study has achieved the best performance in AUC, Precision, Recall and F1-score indicators, effectively improving the recognition rate of minority classes and reducing the false alarm rate. This method can be widely used in imbalanced classification tasks such as financial fraud detection, medical diagnosis, and anomaly detection, providing a new solution for related research.", + "bbox": [ + 66, + 311, + 488, + 612 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords-Deep probabilistic graphical models; Imbalanced data classification; Variational inference; Adversarial learning", + "bbox": [ + 66, + 625, + 478, + 652 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "I. INTRODUCTION", + "text_level": 1, + "bbox": [ + 210, + 661, + 356, + 674 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In modern data analysis and machine learning research, the issue of data imbalance has emerged as a critical challenge affecting classifier performance. Many real-world classification tasks exhibit highly skewed class distributions, such as rare disease detection in market risk analysis and prediction [1], financial fraud detection [2-4], and cybersecurity anomaly [5] detection. In these scenarios, the number of minority class samples is significantly lower than that of the majority class. Traditional machine learning models often prioritize optimizing overall classification accuracy, leading to a substantially reduced recognition rate for minority class instances. Existing approaches primarily include data-level resampling strategies, algorithm-level cost-sensitive learning, and adaptive training strategies in deep learning. However, these methods still face several challenges in practical applications, such as potential noise introduction in resampling, the need for precise loss", + "bbox": [ + 66, + 680, + 486, + 902 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "weight tuning in cost-sensitive learning, and the limited generalization ability of deep learning models on imbalanced data [6]. Consequently, designing more robust and generalizable classification methods to enhance the performance of imbalanced data classification remains a core research problem.", + "bbox": [ + 506, + 311, + 928, + 395 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In recent years, deep probabilistic graphical models (DPGMs) have gained significant attention in machine learning and pattern recognition due to their powerful representation capabilities and uncertainty modeling. Probabilistic graphical models integrate probabilistic statistical theory with graph-based methods, effectively capturing complex dependencies among variables and modeling data uncertainty [7]. Compared to conventional deep neural networks, probabilistic graphical models offer notable advantages in small-sample learning, data sparsity, and uncertainty reasoning. The emergence of Bayesian deep learning, variational inference, and graph neural networks has further strengthened the applicability of DPGMs in addressing data imbalance challenges [8]. By incorporating probabilistic priors and posterior distributions, these models can more accurately characterize minority class data while effectively accounting for uncertainty in decision-making [9], thereby enhancing the classifier's sensitivity to minority class instances. Given this, exploring how to leverage the strengths of deep probabilistic graphical models to develop a more robust imbalanced data classification framework holds both theoretical significance and practical potential.", + "bbox": [ + 506, + 401, + 928, + 693 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The purpose of this study is to investigate the effectiveness of deep probabilistic graphical models (DPGMs) in imbalanced data classification, introducing a novel strategy that emphasizes improved performance on minority class samples. Unlike heuristic methods, this approach leverages the generative capabilities of DPGMs through adaptive probabilistic modeling and structural learning, capturing richer representations of underrepresented samples. Variational inference and Bayesian optimization further refine model parameters, enhancing classification robustness while expanding theoretical insights into DPGMs. Beyond its methodological contributions, the proposed model has notable practical value. In human-computer interaction and the financial sector, user intent prediction methods [10] and time-series risk prediction strategies [11] further underscore how DPGMs can adapt to", + "bbox": [ + 506, + 699, + 929, + 907 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "diverse data structures and real-time processing requirements. Moreover, the efficient market signal detection approach proposed by Zhou et al. [12] highlights the role of advanced neural architectures in combination with DPGMs for continuous data streams. These complementary research directions illustrate the wide-ranging applicability and potential extensions of deep probabilistic models in various industries and research fields. By integrating probabilistic inference and deep learning, this work not only addresses the challenges of imbalanced classification but also broadens the application of deep probabilistic approaches, ultimately enriching the machine learning toolkit for various critical domains.", + "bbox": [ + 66, + 66, + 486, + 233 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "II. METHOD", + "text_level": 1, + "bbox": [ + 230, + 243, + 333, + 256 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Suppose dataset $D = \\{(x_{i},y_{i})\\}_{i = 1}^{N}$ , where $x_{i}\\in R^{d}$ represents input samples and $y_{i}\\in \\{0,1\\}$ represents category labels. Assuming that the ratio of positive and negative samples is seriously unbalanced, that is, $|\\{yi = 1\\} | < < |\\{y_i = 0\\} |$ , traditional deep learning methods tend to favor the majority class when optimizing the loss function. Therefore, we introduce deep probabilistic graphical models (DPGMs). By constructing a joint probability distribution and incorporating variational inference techniques inspired by Wang [13], the proposed model adaptively enhances the representation capability for minority class samples, leading to improved classification performance. This approach effectively mitigates the challenges posed by class imbalance, thereby ensuring that minority class samples are accurately and adequately represented within the learned feature space. Additionally, leveraging dynamic distributed scheduling methodologies as discussed by Sun [14] enables efficient handling of data streams, optimizing both task delays and load balancing. Such strategies significantly contribute to maintaining computational efficiency and enhancing the real-time responsiveness of the system. Furthermore, the synergistic integration of deep learning methods and neural architecture search techniques outlined by Yan et al. [15] further refines the adaptive representational adjustments, ensuring the robustness, accuracy, and generalization capabilities of the proposed classification framework. The architecture of the probabilistic graphical model is shown in Figure 1.", + "bbox": [ + 66, + 263, + 486, + 662 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "First, we define a hidden variable $z$ to model the potential representation of the input data $x$ , and use the Bayesian generation model to describe the data generation process:", + "bbox": [ + 66, + 667, + 486, + 712 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\np (x, y, z) = p (y \\mid z) p (z \\mid x) p (x)\n$$\n", + "text_format": "latex", + "bbox": [ + 163, + 719, + 413, + 734 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Among them, $p(y \\mid z)$ represents the posterior distribution of the classifier for the latent variable, and $p(z \\mid x)$ represents the prior distribution of the latent variable. Based on this, our goal is to optimize the model parameters by maximizing the marginal log-likelihood:", + "bbox": [ + 66, + 744, + 486, + 823 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\log (y \\mid x) = \\log \\int p (y \\mid z) p (z \\mid x) d z\n$$\n", + "text_format": "latex", + "bbox": [ + 151, + 829, + 423, + 854 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/c8945917346c994832f059f7db5489e15026b3e9e6b1a93768160bb4b7790869.jpg", + "image_caption": [ + "Figure 1. The architecture of the probabilistic graphical model" + ], + "image_footnote": [], + "bbox": [ + 545, + 73, + 893, + 284 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, this integral is difficult to compute directly, so we use variational inference to approximate the solution. Define a variational distribution $q(z|x)$ to approximate $p(z|x)$ , and optimize the model through the evidence lower bound (ELBO):", + "bbox": [ + 508, + 338, + 928, + 416 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\log p (y \\mid x) \\geq E _ {q (z \\mid x)} [ \\log p (y \\mid z) ] - D _ {K L} (q (z \\mid x) \\| p (z))\n$$\n", + "text_format": "latex", + "bbox": [ + 509, + 426, + 931, + 446 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Among them, $D_{KL}(\\cdot \\| \\cdot)$ represents the Kullback-Leibler divergence, which is used to measure the gap between the approximate distribution and the true posterior distribution. In order to further optimize the classification performance of minority classes, we introduce category-aware variational inference and explicitly enhance the weight of minority class samples in the loss function:", + "bbox": [ + 508, + 450, + 928, + 551 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nL = \\sum_ {i = 1} ^ {N} w (y _ {i}) \\left[ E _ {q (z | x _ {i})} [ \\log p (y _ {i} \\mid z) ] - \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 580, + 561, + 854, + 606 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Among them, $w(y_{i})$ is the category weight coefficient, and a higher weight is set for minority class samples, for example:", + "bbox": [ + 508, + 618, + 926, + 652 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nw \\left(y _ {i}\\right) = \\frac {N _ {\\text {m a j o r}}}{N _ {\\text {m i n o r}}}\n$$\n", + "text_format": "latex", + "bbox": [ + 658, + 660, + 779, + 700 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Where A and B represent the number of samples in the majority class and the minority class, respectively.", + "bbox": [ + 508, + 709, + 926, + 739 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In the specific implementation, we use variational autoencoder (VAE) as the probability generation model, so that $p(z \\mid x)$ obeys the normal distribution:", + "bbox": [ + 508, + 744, + 926, + 790 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nq (z \\mid x) = N (\\mu (x), \\sigma^ {2} (x))\n$$\n", + "text_format": "latex", + "bbox": [ + 617, + 800, + 820, + 819 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "And optimize it by reparameterization technique:", + "bbox": [ + 509, + 828, + 833, + 843 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nz = \\mu (x) + \\sigma (x) \\cdot \\varepsilon , \\quad \\varepsilon \\sim \\mathrm {N} (0, \\mathrm {I})\n$$\n", + "text_format": "latex", + "bbox": [ + 594, + 848, + 841, + 864 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this way, the model can learn A and B through the neural network to obtain a more stable gradient. In addition, building", + "bbox": [ + 508, + 875, + 926, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "on previous adversarial learning frameworks [16] and generative design concepts [17], we introduce an adversarial learning mechanism to optimize the category distribution. This mechanism strengthens the model's ability to differentiate minority class samples by ensuring that generated data more closely matches real distributions. Specifically, a discriminator is constructed to distinguish the distribution of generated minority class samples from authentic instances, ensuring closer alignment with observed data. Furthermore, incorporating few-shot learning strategies [18] and dynamic adaptation techniques [19] enhances the model's resilience in limited-data conditions. The optimization goal of the discriminator is as follows:", + "bbox": [ + 66, + 66, + 486, + 247 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {G} \\max _ {D} E _ {x \\sim p _ {\\text {d a t a}} (x)} [ \\log D (z) ] +\n$$\n", + "text_format": "latex", + "bbox": [ + 156, + 255, + 393, + 280 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nE _ {x \\sim q _ {(z | x)}} [ \\log (1 - D (z)) ]\n$$\n", + "text_format": "latex", + "bbox": [ + 160, + 284, + 331, + 306 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Through this adversarial learning method, the model can capture the characteristics of minority classes more accurately and avoid the overfitting problem of minority class samples.", + "bbox": [ + 66, + 313, + 486, + 357 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In summary, this study combines deep probabilistic graph models, variational reasoning, and adversarial learning methods to optimize imbalanced data classification tasks.", + "bbox": [ + 66, + 363, + 486, + 405 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "III. EXPERIMENT", + "text_level": 1, + "bbox": [ + 215, + 415, + 346, + 429 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A. Dataset", + "text_level": 1, + "bbox": [ + 66, + 436, + 155, + 450 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This study employs the Kaggle \"Credit Card Fraud Detection\" dataset, consisting of 284,807 credit card transactions from a European institution. Of these transactions, 492 are labeled as fraudulent, indicating a highly imbalanced class distribution (approximately 1:577). Each record contains 30 features, including 28 anonymized components derived from Principal Component Analysis (PCA), along with transaction time and amount. Personally identifiable information has been removed, leaving only numerical features, which were preprocessed through normalization, outlier detection, and data augmentation.", + "bbox": [ + 66, + 454, + 486, + 608 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given the severe class imbalance, direct application of conventional classification models often leads to bias toward the majority class, compromising fraud detection. To address this challenge, we employed various sampling strategies, including under-sampling, over-sampling, and the Synthetic Minority Over-sampling Technique (SMOTE), to generate synthetic samples for the minority class and improve representation. We also evaluated the impact of different sampling methods on model stability and performance. The dataset was split into $70\\%$ training, $15\\%$ validation, and $15\\%$ test sets. Evaluation metrics included Precision, Recall, F1-score, and the Area Under the Receiver Operating Characteristic Curve (AUC-ROC). Comparative experiments with different data augmentation techniques demonstrated that integrating probabilistic modeling with these strategies substantially enhances fraud detection and reduces false positives, thereby improving the model's reliability in practical applications.", + "bbox": [ + 66, + 614, + 486, + 864 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "B. Experiment Result", + "text_level": 1, + "bbox": [ + 509, + 68, + 666, + 82 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This study primarily compares deep probabilistic graphical models (DPGMs) with several advanced imbalanced data classification methods to validate their effectiveness. First, we select the generative adversarial network (GAN)-based methods, such as WGANGP-SMOTE and ADASYN-GAN, which leverage GANs to synthesize minority class samples and mitigate data imbalance. Second, we evaluate class-adaptive ensemble learning methods, including Balanced Random Forest (BRF) and XGBoost-Cost Sensitive, which enhance minority class learning by adjusting sampling strategies or modifying loss functions. Additionally, we compare attention-based imbalanced classification methods, such as Self-Attention Anomaly Detection (SAAD) and Hierarchical Attention Networks (HAN), which have demonstrated strong anomaly detection capabilities in credit card fraud detection and similar tasks. Through these comparative experiments, we aim to comprehensively assess the advantages of deep probabilistic graphical models in minority class representation learning, generalization ability, and classification performance.", + "bbox": [ + 506, + 85, + 929, + 377 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/ffd677e4e9724b33fd97b088d2b56b347e397c7cd6d17964076bb5ea1b746ab4.jpg", + "table_caption": [ + "Table 1. Integration Testing 1" + ], + "table_footnote": [], + "table_body": "
ModelAUCPrecisionRecallF1-Score
GAN [20]0.8420.7160.6540.684
ADASYN [21]0.8560.7290.6680.697
SMOTE [22]0.8710.7420.6830.711
BRF [23]0.8890.7640.7210.742
XGBOOST-Cost [24]0.9030.7790.7350.757
SAAD [25]0.9150.7930.7510.771
HAN [26]0.9270.8060.7680.786
Ours0.9410.8220.7850.803
", + "bbox": [ + 509, + 404, + 929, + 527 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our proposed deep probabilistic graphical model (DPGM) outperforms all compared methods on every evaluation metric, demonstrating superior generalization in imbalanced classification. With an AUC of 0.941, it clearly surpasses traditional oversampling (e.g., SMOTE, ADASYN) and ensemble methods (e.g., BRF, XGBoost-Cost Sensitive). Importantly, it achieves a Recall of 0.785 while maintaining a Precision of 0.822, reflecting its effectiveness in detecting minority class samples without overly biasing the model.", + "bbox": [ + 506, + 529, + 928, + 654 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "DPGMs model the latent distribution of minority samples more effectively than conventional oversampling, thereby reducing overfitting. Compared to attention-based methods (e.g., SAAD, HAN), our model delivers a higher F1-score (0.803 versus 0.786), illustrating the benefits of uncertainty-aware probabilistic modeling. Overall, these results confirm that combining deep probabilistic modeling with variational inference optimizes class distribution and enhances minority class discrimination, offering a robust solution for imbalanced data classification. Figure 2 presents the corresponding loss function trajectory.", + "bbox": [ + 506, + 659, + 929, + 813 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/e739f93a7fbde9cf2a99b06d3897132dc8dbed0714b55865b79655a88af9e655.jpg", + "image_caption": [ + "Figure 2. Loss function drop graph" + ], + "image_footnote": [], + "bbox": [ + 83, + 80, + 470, + 263 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "From the loss function decline curve, both the training loss (Train) and test loss (Test) exhibit a clear downward trend during training iterations. This indicates that the model continuously learns features and optimizes parameters to effectively reduce errors. In the initial phase of training (between 5,000 and 25,000 iterations), the loss decreases at the fastest rate, suggesting that the model rapidly learns data representations and significantly improves classification performance. However, as the number of iterations increases, the rate of loss reduction gradually slows down and stabilizes after approximately 125,000 iterations. This trend implies that the model is approaching convergence, where further optimization yields diminishing returns.", + "bbox": [ + 70, + 306, + 485, + 487 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "A comparison of the training and test loss curves reveals that the test loss consistently remains lower than the training loss, and both curves follow a similar trajectory. This observation suggests that the model demonstrates good generalization ability without exhibiting significant overfitting. If the training loss were substantially lower than the test loss, it would indicate that the model performs well on training data but struggles to generalize to unseen data. However, the current loss curves do not display such a pattern, implying that the applied regularization strategies and optimization methods effectively mitigate overfitting. Furthermore, the test loss decreases at a rate similar to that of the training loss in the initial stages, further validating the model's stable learning process. Overall, these experimental results confirm that the model successfully optimizes the loss function during training, leading to a substantial reduction in both training and test errors ultimately reaching a relatively low level. This outcome suggests that the chosen training strategy, hyperparameter configuration, and optimization techniques are effective, allowing the model to learn the data distribution efficiently while maintaining strong generalization performance. Additionally, the stabilization of the loss curves indicates that the training process has effectively converged, suggesting that training can be halted or fine-tuned further to ensure optimal performance on the test set. Finally, this paper also gives the T-SNE results after training, as shown in Figure 3.", + "bbox": [ + 70, + 494, + 485, + 852 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/6ad69f33204c10e92da4741df3cc69002914239055daa6956253b03c994af45e.jpg", + "image_caption": [ + "Figure 3. T-SNE result map after training" + ], + "image_footnote": [], + "bbox": [ + 517, + 69, + 947, + 349 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "From the T-SNE results, it is evident that after training, the data points form distinct cluster-like distributions in the two-dimensional space, indicating that the model has successfully learned the feature differences between different classes. As observed in the visualization, the two categories (represented in blue and red) are well separated, suggesting that the model has developed strong discriminative capabilities in the high-dimensional feature space. The presence of a clear boundary between the classes demonstrates that the model effectively extracts distinguishing features without causing sample overlap, thereby validating its effectiveness.", + "bbox": [ + 513, + 381, + 926, + 534 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Furthermore, the overall data distribution demonstrates that the T-SNE dimensionality reduction retains intra-class compactness while ensuring inter-class separability. The blue and red data points are well-clustered in distinct regions without significant overlap, indicating that the model effectively distinguishes between different categories in the feature space. Even when dealing with an imbalanced dataset, the model successfully learns the distribution patterns of the minority class.", + "bbox": [ + 513, + 541, + 926, + 666 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "However, while the T-SNE results illustrate a clear class separation, further quantitative evaluation is necessary to assess the robustness of the classification boundaries. For instance, if significant distribution shifts occur in certain test data samples, it may indicate that the model is still susceptible to overfitting. Additionally, since T-SNE is a nonlinear dimensionality reduction method, it may exaggerate the separation between classes, meaning that the actual decision boundaries in the high-dimensional space may not be as well-defined as they appear in the visualization. Therefore, a comprehensive evaluation incorporating classification metrics such as Precision, Recall, and AUC is essential to fully validate the model's generalization performance.", + "bbox": [ + 513, + 672, + 926, + 852 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "IV. CONCLUSION", + "text_level": 1, + "bbox": [ + 215, + 68, + 346, + 80 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This study proposes an imbalanced data classification method based on deep probabilistic graphical models (DPGMs) and validates its effectiveness through experiments on a credit card fraud detection dataset. The experimental results demonstrate that the proposed method outperforms traditional oversampling techniques, ensemble learning approaches, and attention-based models in key metrics such as AUC and F1-score, confirming the effectiveness of probabilistic modeling in handling imbalanced classification tasks. By integrating variational inference, class-weight adjustment, and adversarial learning mechanisms, our model more accurately captures the feature distribution of the minority class, enhancing the classifier's discriminative ability while mitigating the overfitting issues commonly observed in traditional methods.", + "bbox": [ + 66, + 87, + 486, + 282 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Despite the promising performance of our approach in imbalanced data classification, several aspects warrant further improvement. For instance, in cases of extreme imbalance, the minority class samples may still provide insufficient information, potentially limiting the model's generalization capability. Additionally, deep probabilistic graphical models involve high computational complexity, requiring extensive sampling and variational inference steps during training, which may impact deployment efficiency. Therefore, future research could focus on optimizing the computational efficiency of probabilistic modeling to enhance the model's adaptability across different data distributions. Several directions can be explored in future research. More efficient Bayesian optimization methods can be investigated to reduce the computational cost of DPGMs, making them applicable to larger-scale imbalanced datasets. Furthermore, in practical applications, federated learning frameworks can be incorporated to enable cross-institutional model training while preserving data privacy, thereby enhancing the applicability of imbalanced classification methods in real-world scenarios.", + "bbox": [ + 66, + 287, + 486, + 565 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 232, + 574, + 320, + 587 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Y. Cheng, Z. Xu, Y. Chen, Y. Wang, Z. Lin and J. Liu, \"A Deep Learning Framework Integrating CNN and BiLSTM for Financial Systemic Risk Analysis and Prediction,\" arXiv preprint arXiv:2502.06847, 2025.", + "[2] J. Liu, \"Multimodal Data-Driven Factor Models for Stock Market Forecasting,\" Journal of Computer Technology and Software, vol. 4, no. 2, 2025, https://doi.org/10.5281/zenodo.14984969.", + "[3] Y. Deng, \"A Hybrid Network Congestion Prediction Method Integrating Association Rules and LSTM for Enhanced Spatiotemporal Forecasting,\" Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14912727.", + "[4] P. Feng, \"Hybrid BiLSTM-Transformer Model for Identifying Fraudulent Transactions in Financial Systems,\" Journal of Computer Science and Software Applications, vol. 5, no. 3, 2025, https://doi.org/10.5281/zenodo.14985026.", + "[5] X. Du, \"Optimized Convolutional Neural Network for Intelligent Financial Statement Anomaly Detection,\" Journal of Computer Technology and Software, vol. 3, no. 9, pp. 11-15, 2024.", + "[6] S. Moolchandani, \"Advancing Credit Risk Management: Embracing Probabilistic Graphical Models in Banking,\" International Journal of Science and Research (IJSR), vol. 13, no. 6, pp. 74-80, 2024.", + "[7] S. Arya, T. Rahman and V. Gogate, \"Learning to Solve the Constrained Most Probable Explanation Task in Probabilistic Graphical Models,\" Proceedings of the 2024 International Conference on Artificial Intelligence and Statistics (AISTATS), PMLR, pp. 2791-2799, 2024." + ], + "bbox": [ + 68, + 592, + 486, + 897 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[8] J. Hu, T. An, Z. Yu, J. Du and Y. Luo, \"Contrastive Learning for Cold Start Recommendation with Adaptive Feature Fusion,\" arXiv preprint arXiv:2502.03664, 2025.", + "[9] J. Zhan, \"Elastic Scheduling of Micro-Modules in Edge Computing Based on LSTM Prediction,\" Journal of Computer Technology and Software, vol. 4, no. 2, 2025, https://doi.org/10.5281/zenodo.14984949.", + "[10] Q. Sun and S. Duan, \"User Intent Prediction and Response in Human-Computer Interaction via BiLSTM,\" Journal of Computer Science and Software Applications, vol. 5, no. 3, 2025, https://doi.org/10.5281/zenodo.14985042.", + "[11] Y. Wang, \"Time-Series Premium Risk Prediction via Bidirectional Transformer,\" Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14955913.", + "[12] T. Zhou, Z. Xu and J. Du, \"Efficient Market Signal Prediction for Blockchain HFT with Temporal Convolutional Networks,\" Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14912719.", + "[13] X. Wang, \"Data Mining Framework Leveraging Stable Diffusion: A Unified Approach for Classification and Anomaly Detection,\" Journal of Computer Technology and Software, vol. 4, no. 1, 2025, https://doi.org/10.5281/zenodo.14843181.", + "[14] X. Sun, \"Dynamic Distributed Scheduling for Data Stream Computing: Balancing Task Delay and Load Efficiency\", Journal of Computer Technology and Software, vol. 4, no. 1, 2025, https://doi.org/10.5281/zenodo.14785261.", + "[15] X. Yan, J. Du, L. Wang, Y. Liang, J. Hu and B. Wang, \"The Synergistic Role of Deep Learning and Neural Architecture Search in Advancing Artificial Intelligence,\" Proceedings of the 2024 International Conference on Electronics and Devices, Computational Science (ICEDCS), pp. 452-456, Sep. 2024.", + "[16] P. Li, \"Improved Transformer for Cross-Domain Knowledge Extraction with Feature Alignment,\" Journal of Computer Science and Software Applications, vol. 5, no. 2, 2024.", + "[17] S. Wang, R. Zhang and X. Shi, \"Generative UI Design with Diffusion Models: Exploring Automated Interface Creation and Human-Computer Interaction,\" Transactions on Computational and Scientific Methods, vol. 5, no. 3, 2025.", + "[18] J. Gao, S. Lyu, G. Liu, B. Zhu, H. Zheng and X. Liao, \"A Hybrid Model for Few-Shot Text Classification Using Transfer and Meta-Learning,\" arXiv preprint arXiv:2502.09086, 2025.", + "[19] Y. Yao, \"Time-Series Nested Reinforcement Learning for Dynamic Risk Control in Nonlinear Financial Markets,\" Transactions on Computational and Scientific Methods, vol. 5, no. 1, 2025, https://doi.org/10.5281/zenodo.14677117.", + "[20] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. Courville and Y. Bengio, \"Generative Adversarial Nets,\" Proceedings of the 27th Advances in Neural Information Processing Systems (NeurIPS), pp. 1–9, 2014.", + "[21] H. He and Y. Bai, \"ADASYN: Adaptive Synthetic Sampling Approach for Imbalanced Learning,\" Proceedings of the IEEE International Joint Conference on Neural Networks (IEEE World Congress on Computational Intelligence), pp. 1322-1328, 2008.", + "[22] N. V. Chawla, K. W. Bowyer, L. O. Hall and W. P. Kegelmeyer, \"SMOTE: Synthetic Minority Over-Sampling Technique,\" Journal of Artificial Intelligence Research, vol. 16, pp. 321-357, 2002.", + "[23] A. Liaw and M. Wiener, \"Classification and Regression by randomForest,\" R News, vol. 2, no. 3, pp. 18-22, 2002.", + "[24] T. Chen and C. Guestrin, \"XGBoost: A Scalable Tree Boosting System,\" Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 785-794, 2016.", + "[25] Y. Zhou and R. C. Paffenroth, \"Self-Attention Anomaly Detection,\" Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 2774-2782, 2019.", + "[26] Z. Yang, D. Yang, C. Dyer, X. He, A. Smola and E. Hovy, \"Hierarchical Attention Networks for Document Classification,\" Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 1480-1489, 2016." + ], + "bbox": [ + 511, + 66, + 928, + 897 + ], + "page_idx": 4 + } +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05758/607ad388-a3c2-41a7-ba56-f696cce741af_model.json b/data/2025/2504_05xxx/2504.05758/607ad388-a3c2-41a7-ba56-f696cce741af_model.json new file mode 100644 index 0000000000000000000000000000000000000000..318d2061a14a2b219dd13c98f1e6d7f65f547bc2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05758/607ad388-a3c2-41a7-ba56-f696cce741af_model.json @@ -0,0 +1,1013 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.131, + 0.07, + 0.868, + 0.138 + ], + "angle": 0, + "content": "Addressing Class Imbalance with Probabilistic Graphical Models and Variational Inference" + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.145, + 0.278, + 0.191 + ], + "angle": 0, + "content": "Yujia Lou \nUniversity of Rochester \nRochester, USA" + }, + { + "type": "text", + "bbox": [ + 0.426, + 0.145, + 0.574, + 0.191 + ], + "angle": 0, + "content": "Jie Liu \nUniversity of Minnesota \nMinneapolis, USA" + }, + { + "type": "text", + "bbox": [ + 0.723, + 0.145, + 0.869, + 0.185 + ], + "angle": 0, + "content": "Yuan Sheng Northeastern University Seattle, USA" + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.221, + 0.276, + 0.265 + ], + "angle": 0, + "content": "Jiawei Wang \nUniversity of California Los Angeles, USA" + }, + { + "type": "text", + "bbox": [ + 0.442, + 0.221, + 0.556, + 0.263 + ], + "angle": 0, + "content": "Yiwei Zhang Cornell University Ithaca, USA" + }, + { + "type": "text", + "bbox": [ + 0.723, + 0.221, + 0.869, + 0.265 + ], + "angle": 0, + "content": "Yaokun Ren* Northeastern University Seattle, USA" + }, + { + "type": "text", + "bbox": [ + 0.067, + 0.312, + 0.489, + 0.613 + ], + "angle": 0, + "content": "Abstract-This study proposes a method for imbalanced data classification based on deep probabilistic graphical models (DPGMs) to solve the problem that traditional methods have insufficient learning ability for minority class samples. To address the classification bias caused by class imbalance, we introduce variational inference optimization probability modeling, which enables the model to adaptively adjust the representation ability of minority classes and combines the class-aware weight adjustment strategy to enhance the classifier's sensitivity to minority classes. In addition, we combine the adversarial learning mechanism to generate minority class samples in the latent space so that the model can better characterize the category boundary in the high-dimensional feature space. The experiment is evaluated on the Kaggle \"Credit Card Fraud Detection\" dataset and compared with a variety of advanced imbalanced classification methods (such as GAN-based sampling, BRF, XGBoost-Cost Sensitive, SAAD, HAN). The results show that the method in this study has achieved the best performance in AUC, Precision, Recall and F1-score indicators, effectively improving the recognition rate of minority classes and reducing the false alarm rate. This method can be widely used in imbalanced classification tasks such as financial fraud detection, medical diagnosis, and anomaly detection, providing a new solution for related research." + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.625, + 0.48, + 0.654 + ], + "angle": 0, + "content": "Keywords-Deep probabilistic graphical models; Imbalanced data classification; Variational inference; Adversarial learning" + }, + { + "type": "title", + "bbox": [ + 0.211, + 0.662, + 0.357, + 0.675 + ], + "angle": 0, + "content": "I. INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.067, + 0.681, + 0.487, + 0.904 + ], + "angle": 0, + "content": "In modern data analysis and machine learning research, the issue of data imbalance has emerged as a critical challenge affecting classifier performance. Many real-world classification tasks exhibit highly skewed class distributions, such as rare disease detection in market risk analysis and prediction [1], financial fraud detection [2-4], and cybersecurity anomaly [5] detection. In these scenarios, the number of minority class samples is significantly lower than that of the majority class. Traditional machine learning models often prioritize optimizing overall classification accuracy, leading to a substantially reduced recognition rate for minority class instances. Existing approaches primarily include data-level resampling strategies, algorithm-level cost-sensitive learning, and adaptive training strategies in deep learning. However, these methods still face several challenges in practical applications, such as potential noise introduction in resampling, the need for precise loss" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.312, + 0.929, + 0.396 + ], + "angle": 0, + "content": "weight tuning in cost-sensitive learning, and the limited generalization ability of deep learning models on imbalanced data [6]. Consequently, designing more robust and generalizable classification methods to enhance the performance of imbalanced data classification remains a core research problem." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.402, + 0.929, + 0.694 + ], + "angle": 0, + "content": "In recent years, deep probabilistic graphical models (DPGMs) have gained significant attention in machine learning and pattern recognition due to their powerful representation capabilities and uncertainty modeling. Probabilistic graphical models integrate probabilistic statistical theory with graph-based methods, effectively capturing complex dependencies among variables and modeling data uncertainty [7]. Compared to conventional deep neural networks, probabilistic graphical models offer notable advantages in small-sample learning, data sparsity, and uncertainty reasoning. The emergence of Bayesian deep learning, variational inference, and graph neural networks has further strengthened the applicability of DPGMs in addressing data imbalance challenges [8]. By incorporating probabilistic priors and posterior distributions, these models can more accurately characterize minority class data while effectively accounting for uncertainty in decision-making [9], thereby enhancing the classifier's sensitivity to minority class instances. Given this, exploring how to leverage the strengths of deep probabilistic graphical models to develop a more robust imbalanced data classification framework holds both theoretical significance and practical potential." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.7, + 0.93, + 0.908 + ], + "angle": 0, + "content": "The purpose of this study is to investigate the effectiveness of deep probabilistic graphical models (DPGMs) in imbalanced data classification, introducing a novel strategy that emphasizes improved performance on minority class samples. Unlike heuristic methods, this approach leverages the generative capabilities of DPGMs through adaptive probabilistic modeling and structural learning, capturing richer representations of underrepresented samples. Variational inference and Bayesian optimization further refine model parameters, enhancing classification robustness while expanding theoretical insights into DPGMs. Beyond its methodological contributions, the proposed model has notable practical value. In human-computer interaction and the financial sector, user intent prediction methods [10] and time-series risk prediction strategies [11] further underscore how DPGMs can adapt to" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.068, + 0.068, + 0.487, + 0.234 + ], + "angle": 0, + "content": "diverse data structures and real-time processing requirements. Moreover, the efficient market signal detection approach proposed by Zhou et al. [12] highlights the role of advanced neural architectures in combination with DPGMs for continuous data streams. These complementary research directions illustrate the wide-ranging applicability and potential extensions of deep probabilistic models in various industries and research fields. By integrating probabilistic inference and deep learning, this work not only addresses the challenges of imbalanced classification but also broadens the application of deep probabilistic approaches, ultimately enriching the machine learning toolkit for various critical domains." + }, + { + "type": "title", + "bbox": [ + 0.232, + 0.244, + 0.334, + 0.257 + ], + "angle": 0, + "content": "II. METHOD" + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.265, + 0.487, + 0.664 + ], + "angle": 0, + "content": "Suppose dataset \\( D = \\{(x_{i},y_{i})\\}_{i = 1}^{N} \\), where \\( x_{i}\\in R^{d} \\) represents input samples and \\( y_{i}\\in \\{0,1\\} \\) represents category labels. Assuming that the ratio of positive and negative samples is seriously unbalanced, that is, \\( |\\{yi = 1\\} | < < |\\{y_i = 0\\} | \\), traditional deep learning methods tend to favor the majority class when optimizing the loss function. Therefore, we introduce deep probabilistic graphical models (DPGMs). By constructing a joint probability distribution and incorporating variational inference techniques inspired by Wang [13], the proposed model adaptively enhances the representation capability for minority class samples, leading to improved classification performance. This approach effectively mitigates the challenges posed by class imbalance, thereby ensuring that minority class samples are accurately and adequately represented within the learned feature space. Additionally, leveraging dynamic distributed scheduling methodologies as discussed by Sun [14] enables efficient handling of data streams, optimizing both task delays and load balancing. Such strategies significantly contribute to maintaining computational efficiency and enhancing the real-time responsiveness of the system. Furthermore, the synergistic integration of deep learning methods and neural architecture search techniques outlined by Yan et al. [15] further refines the adaptive representational adjustments, ensuring the robustness, accuracy, and generalization capabilities of the proposed classification framework. The architecture of the probabilistic graphical model is shown in Figure 1." + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.669, + 0.487, + 0.713 + ], + "angle": 0, + "content": "First, we define a hidden variable \\( z \\) to model the potential representation of the input data \\( x \\), and use the Bayesian generation model to describe the data generation process:" + }, + { + "type": "equation", + "bbox": [ + 0.165, + 0.72, + 0.414, + 0.736 + ], + "angle": 0, + "content": "\\[\np (x, y, z) = p (y \\mid z) p (z \\mid x) p (x)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.745, + 0.488, + 0.824 + ], + "angle": 0, + "content": "Among them, \\( p(y \\mid z) \\) represents the posterior distribution of the classifier for the latent variable, and \\( p(z \\mid x) \\) represents the prior distribution of the latent variable. Based on this, our goal is to optimize the model parameters by maximizing the marginal log-likelihood:" + }, + { + "type": "equation", + "bbox": [ + 0.153, + 0.83, + 0.424, + 0.855 + ], + "angle": 0, + "content": "\\[\n\\log (y \\mid x) = \\log \\int p (y \\mid z) p (z \\mid x) d z\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.547, + 0.074, + 0.895, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.548, + 0.304, + 0.912, + 0.331 + ], + "angle": 0, + "content": "Figure 1. The architecture of the probabilistic graphical model" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.339, + 0.929, + 0.417 + ], + "angle": 0, + "content": "However, this integral is difficult to compute directly, so we use variational inference to approximate the solution. Define a variational distribution \\( q(z|x) \\) to approximate \\( p(z|x) \\), and optimize the model through the evidence lower bound (ELBO):" + }, + { + "type": "equation", + "bbox": [ + 0.511, + 0.427, + 0.933, + 0.447 + ], + "angle": 0, + "content": "\\[\n\\log p (y \\mid x) \\geq E _ {q (z \\mid x)} [ \\log p (y \\mid z) ] - D _ {K L} (q (z \\mid x) \\| p (z))\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.452, + 0.929, + 0.553 + ], + "angle": 0, + "content": "Among them, \\( D_{KL}(\\cdot \\| \\cdot) \\) represents the Kullback-Leibler divergence, which is used to measure the gap between the approximate distribution and the true posterior distribution. In order to further optimize the classification performance of minority classes, we introduce category-aware variational inference and explicitly enhance the weight of minority class samples in the loss function:" + }, + { + "type": "equation", + "bbox": [ + 0.581, + 0.563, + 0.856, + 0.607 + ], + "angle": 0, + "content": "\\[\nL = \\sum_ {i = 1} ^ {N} w (y _ {i}) \\left[ E _ {q (z | x _ {i})} [ \\log p (y _ {i} \\mid z) ] - \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.619, + 0.928, + 0.653 + ], + "angle": 0, + "content": "Among them, \\( w(y_{i}) \\) is the category weight coefficient, and a higher weight is set for minority class samples, for example:" + }, + { + "type": "equation", + "bbox": [ + 0.659, + 0.661, + 0.78, + 0.702 + ], + "angle": 0, + "content": "\\[\nw \\left(y _ {i}\\right) = \\frac {N _ {\\text {m a j o r}}}{N _ {\\text {m i n o r}}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.71, + 0.928, + 0.74 + ], + "angle": 0, + "content": "Where A and B represent the number of samples in the majority class and the minority class, respectively." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.745, + 0.928, + 0.791 + ], + "angle": 0, + "content": "In the specific implementation, we use variational autoencoder (VAE) as the probability generation model, so that \\( p(z \\mid x) \\) obeys the normal distribution:" + }, + { + "type": "equation", + "bbox": [ + 0.618, + 0.801, + 0.821, + 0.82 + ], + "angle": 0, + "content": "\\[\nq (z \\mid x) = N (\\mu (x), \\sigma^ {2} (x))\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.829, + 0.834, + 0.844 + ], + "angle": 0, + "content": "And optimize it by reparameterization technique:" + }, + { + "type": "equation", + "bbox": [ + 0.596, + 0.849, + 0.843, + 0.866 + ], + "angle": 0, + "content": "\\[\nz = \\mu (x) + \\sigma (x) \\cdot \\varepsilon , \\quad \\varepsilon \\sim \\mathrm {N} (0, \\mathrm {I})\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.875, + 0.928, + 0.904 + ], + "angle": 0, + "content": "In this way, the model can learn A and B through the neural network to obtain a more stable gradient. In addition, building" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.067, + 0.068, + 0.487, + 0.248 + ], + "angle": 0, + "content": "on previous adversarial learning frameworks [16] and generative design concepts [17], we introduce an adversarial learning mechanism to optimize the category distribution. This mechanism strengthens the model's ability to differentiate minority class samples by ensuring that generated data more closely matches real distributions. Specifically, a discriminator is constructed to distinguish the distribution of generated minority class samples from authentic instances, ensuring closer alignment with observed data. Furthermore, incorporating few-shot learning strategies [18] and dynamic adaptation techniques [19] enhances the model's resilience in limited-data conditions. The optimization goal of the discriminator is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.157, + 0.256, + 0.395, + 0.281 + ], + "angle": 0, + "content": "\\[\n\\min _ {G} \\max _ {D} E _ {x \\sim p _ {\\text {d a t a}} (x)} [ \\log D (z) ] +\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.161, + 0.285, + 0.332, + 0.307 + ], + "angle": 0, + "content": "\\[\nE _ {x \\sim q _ {(z | x)}} [ \\log (1 - D (z)) ]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.314, + 0.487, + 0.358 + ], + "angle": 0, + "content": "Through this adversarial learning method, the model can capture the characteristics of minority classes more accurately and avoid the overfitting problem of minority class samples." + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.364, + 0.487, + 0.406 + ], + "angle": 0, + "content": "In summary, this study combines deep probabilistic graph models, variational reasoning, and adversarial learning methods to optimize imbalanced data classification tasks." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.416, + 0.347, + 0.43 + ], + "angle": 0, + "content": "III. EXPERIMENT" + }, + { + "type": "title", + "bbox": [ + 0.068, + 0.438, + 0.156, + 0.451 + ], + "angle": 0, + "content": "A. Dataset" + }, + { + "type": "text", + "bbox": [ + 0.067, + 0.455, + 0.488, + 0.609 + ], + "angle": 0, + "content": "This study employs the Kaggle \"Credit Card Fraud Detection\" dataset, consisting of 284,807 credit card transactions from a European institution. Of these transactions, 492 are labeled as fraudulent, indicating a highly imbalanced class distribution (approximately 1:577). Each record contains 30 features, including 28 anonymized components derived from Principal Component Analysis (PCA), along with transaction time and amount. Personally identifiable information has been removed, leaving only numerical features, which were preprocessed through normalization, outlier detection, and data augmentation." + }, + { + "type": "text", + "bbox": [ + 0.067, + 0.615, + 0.487, + 0.865 + ], + "angle": 0, + "content": "Given the severe class imbalance, direct application of conventional classification models often leads to bias toward the majority class, compromising fraud detection. To address this challenge, we employed various sampling strategies, including under-sampling, over-sampling, and the Synthetic Minority Over-sampling Technique (SMOTE), to generate synthetic samples for the minority class and improve representation. We also evaluated the impact of different sampling methods on model stability and performance. The dataset was split into \\(70\\%\\) training, \\(15\\%\\) validation, and \\(15\\%\\) test sets. Evaluation metrics included Precision, Recall, F1-score, and the Area Under the Receiver Operating Characteristic Curve (AUC-ROC). Comparative experiments with different data augmentation techniques demonstrated that integrating probabilistic modeling with these strategies substantially enhances fraud detection and reduces false positives, thereby improving the model's reliability in practical applications." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.069, + 0.667, + 0.083 + ], + "angle": 0, + "content": "B. Experiment Result" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.086, + 0.93, + 0.378 + ], + "angle": 0, + "content": "This study primarily compares deep probabilistic graphical models (DPGMs) with several advanced imbalanced data classification methods to validate their effectiveness. First, we select the generative adversarial network (GAN)-based methods, such as WGANGP-SMOTE and ADASYN-GAN, which leverage GANs to synthesize minority class samples and mitigate data imbalance. Second, we evaluate class-adaptive ensemble learning methods, including Balanced Random Forest (BRF) and XGBoost-Cost Sensitive, which enhance minority class learning by adjusting sampling strategies or modifying loss functions. Additionally, we compare attention-based imbalanced classification methods, such as Self-Attention Anomaly Detection (SAAD) and Hierarchical Attention Networks (HAN), which have demonstrated strong anomaly detection capabilities in credit card fraud detection and similar tasks. Through these comparative experiments, we aim to comprehensively assess the advantages of deep probabilistic graphical models in minority class representation learning, generalization ability, and classification performance." + }, + { + "type": "table_caption", + "bbox": [ + 0.619, + 0.392, + 0.819, + 0.405 + ], + "angle": 0, + "content": "Table 1. Integration Testing 1" + }, + { + "type": "table", + "bbox": [ + 0.51, + 0.405, + 0.93, + 0.528 + ], + "angle": 0, + "content": "
ModelAUCPrecisionRecallF1-Score
GAN [20]0.8420.7160.6540.684
ADASYN [21]0.8560.7290.6680.697
SMOTE [22]0.8710.7420.6830.711
BRF [23]0.8890.7640.7210.742
XGBOOST-Cost [24]0.9030.7790.7350.757
SAAD [25]0.9150.7930.7510.771
HAN [26]0.9270.8060.7680.786
Ours0.9410.8220.7850.803
" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.53, + 0.929, + 0.655 + ], + "angle": 0, + "content": "Our proposed deep probabilistic graphical model (DPGM) outperforms all compared methods on every evaluation metric, demonstrating superior generalization in imbalanced classification. With an AUC of 0.941, it clearly surpasses traditional oversampling (e.g., SMOTE, ADASYN) and ensemble methods (e.g., BRF, XGBoost-Cost Sensitive). Importantly, it achieves a Recall of 0.785 while maintaining a Precision of 0.822, reflecting its effectiveness in detecting minority class samples without overly biasing the model." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.66, + 0.93, + 0.814 + ], + "angle": 0, + "content": "DPGMs model the latent distribution of minority samples more effectively than conventional oversampling, thereby reducing overfitting. Compared to attention-based methods (e.g., SAAD, HAN), our model delivers a higher F1-score (0.803 versus 0.786), illustrating the benefits of uncertainty-aware probabilistic modeling. Overall, these results confirm that combining deep probabilistic modeling with variational inference optimizes class distribution and enhances minority class discrimination, offering a robust solution for imbalanced data classification. Figure 2 presents the corresponding loss function trajectory." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.082, + 0.471, + 0.265 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.177, + 0.286, + 0.404, + 0.301 + ], + "angle": 0, + "content": "Figure 2. Loss function drop graph" + }, + { + "type": "text", + "bbox": [ + 0.071, + 0.308, + 0.486, + 0.488 + ], + "angle": 0, + "content": "From the loss function decline curve, both the training loss (Train) and test loss (Test) exhibit a clear downward trend during training iterations. This indicates that the model continuously learns features and optimizes parameters to effectively reduce errors. In the initial phase of training (between 5,000 and 25,000 iterations), the loss decreases at the fastest rate, suggesting that the model rapidly learns data representations and significantly improves classification performance. However, as the number of iterations increases, the rate of loss reduction gradually slows down and stabilizes after approximately 125,000 iterations. This trend implies that the model is approaching convergence, where further optimization yields diminishing returns." + }, + { + "type": "text", + "bbox": [ + 0.071, + 0.495, + 0.486, + 0.853 + ], + "angle": 0, + "content": "A comparison of the training and test loss curves reveals that the test loss consistently remains lower than the training loss, and both curves follow a similar trajectory. This observation suggests that the model demonstrates good generalization ability without exhibiting significant overfitting. If the training loss were substantially lower than the test loss, it would indicate that the model performs well on training data but struggles to generalize to unseen data. However, the current loss curves do not display such a pattern, implying that the applied regularization strategies and optimization methods effectively mitigate overfitting. Furthermore, the test loss decreases at a rate similar to that of the training loss in the initial stages, further validating the model's stable learning process. Overall, these experimental results confirm that the model successfully optimizes the loss function during training, leading to a substantial reduction in both training and test errors ultimately reaching a relatively low level. This outcome suggests that the chosen training strategy, hyperparameter configuration, and optimization techniques are effective, allowing the model to learn the data distribution efficiently while maintaining strong generalization performance. Additionally, the stabilization of the loss curves indicates that the training process has effectively converged, suggesting that training can be halted or fine-tuned further to ensure optimal performance on the test set. Finally, this paper also gives the T-SNE results after training, as shown in Figure 3." + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.07, + 0.949, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.586, + 0.362, + 0.856, + 0.376 + ], + "angle": 0, + "content": "Figure 3. T-SNE result map after training" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.382, + 0.928, + 0.535 + ], + "angle": 0, + "content": "From the T-SNE results, it is evident that after training, the data points form distinct cluster-like distributions in the two-dimensional space, indicating that the model has successfully learned the feature differences between different classes. As observed in the visualization, the two categories (represented in blue and red) are well separated, suggesting that the model has developed strong discriminative capabilities in the high-dimensional feature space. The presence of a clear boundary between the classes demonstrates that the model effectively extracts distinguishing features without causing sample overlap, thereby validating its effectiveness." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.542, + 0.928, + 0.667 + ], + "angle": 0, + "content": "Furthermore, the overall data distribution demonstrates that the T-SNE dimensionality reduction retains intra-class compactness while ensuring inter-class separability. The blue and red data points are well-clustered in distinct regions without significant overlap, indicating that the model effectively distinguishes between different categories in the feature space. Even when dealing with an imbalanced dataset, the model successfully learns the distribution patterns of the minority class." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.673, + 0.928, + 0.853 + ], + "angle": 0, + "content": "However, while the T-SNE results illustrate a clear class separation, further quantitative evaluation is necessary to assess the robustness of the classification boundaries. For instance, if significant distribution shifts occur in certain test data samples, it may indicate that the model is still susceptible to overfitting. Additionally, since T-SNE is a nonlinear dimensionality reduction method, it may exaggerate the separation between classes, meaning that the actual decision boundaries in the high-dimensional space may not be as well-defined as they appear in the visualization. Therefore, a comprehensive evaluation incorporating classification metrics such as Precision, Recall, and AUC is essential to fully validate the model's generalization performance." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.216, + 0.069, + 0.347, + 0.082 + ], + "angle": 0, + "content": "IV. CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.067, + 0.088, + 0.487, + 0.283 + ], + "angle": 0, + "content": "This study proposes an imbalanced data classification method based on deep probabilistic graphical models (DPGMs) and validates its effectiveness through experiments on a credit card fraud detection dataset. The experimental results demonstrate that the proposed method outperforms traditional oversampling techniques, ensemble learning approaches, and attention-based models in key metrics such as AUC and F1-score, confirming the effectiveness of probabilistic modeling in handling imbalanced classification tasks. By integrating variational inference, class-weight adjustment, and adversarial learning mechanisms, our model more accurately captures the feature distribution of the minority class, enhancing the classifier's discriminative ability while mitigating the overfitting issues commonly observed in traditional methods." + }, + { + "type": "text", + "bbox": [ + 0.067, + 0.288, + 0.487, + 0.566 + ], + "angle": 0, + "content": "Despite the promising performance of our approach in imbalanced data classification, several aspects warrant further improvement. For instance, in cases of extreme imbalance, the minority class samples may still provide insufficient information, potentially limiting the model's generalization capability. Additionally, deep probabilistic graphical models involve high computational complexity, requiring extensive sampling and variational inference steps during training, which may impact deployment efficiency. Therefore, future research could focus on optimizing the computational efficiency of probabilistic modeling to enhance the model's adaptability across different data distributions. Several directions can be explored in future research. More efficient Bayesian optimization methods can be investigated to reduce the computational cost of DPGMs, making them applicable to larger-scale imbalanced datasets. Furthermore, in practical applications, federated learning frameworks can be incorporated to enable cross-institutional model training while preserving data privacy, thereby enhancing the applicability of imbalanced classification methods in real-world scenarios." + }, + { + "type": "title", + "bbox": [ + 0.233, + 0.575, + 0.321, + 0.588 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.07, + 0.593, + 0.486, + 0.64 + ], + "angle": 0, + "content": "[1] Y. Cheng, Z. Xu, Y. Chen, Y. Wang, Z. Lin and J. Liu, \"A Deep Learning Framework Integrating CNN and BiLSTM for Financial Systemic Risk Analysis and Prediction,\" arXiv preprint arXiv:2502.06847, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.071, + 0.642, + 0.485, + 0.677 + ], + "angle": 0, + "content": "[2] J. Liu, \"Multimodal Data-Driven Factor Models for Stock Market Forecasting,\" Journal of Computer Technology and Software, vol. 4, no. 2, 2025, https://doi.org/10.5281/zenodo.14984969." + }, + { + "type": "ref_text", + "bbox": [ + 0.071, + 0.679, + 0.485, + 0.725 + ], + "angle": 0, + "content": "[3] Y. Deng, \"A Hybrid Network Congestion Prediction Method Integrating Association Rules and LSTM for Enhanced Spatiotemporal Forecasting,\" Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14912727." + }, + { + "type": "ref_text", + "bbox": [ + 0.071, + 0.728, + 0.486, + 0.773 + ], + "angle": 0, + "content": "[4] P. Feng, \"Hybrid BiLSTM-Transformer Model for Identifying Fraudulent Transactions in Financial Systems,\" Journal of Computer Science and Software Applications, vol. 5, no. 3, 2025, https://doi.org/10.5281/zenodo.14985026." + }, + { + "type": "ref_text", + "bbox": [ + 0.071, + 0.776, + 0.485, + 0.812 + ], + "angle": 0, + "content": "[5] X. Du, \"Optimized Convolutional Neural Network for Intelligent Financial Statement Anomaly Detection,\" Journal of Computer Technology and Software, vol. 3, no. 9, pp. 11-15, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.071, + 0.814, + 0.487, + 0.849 + ], + "angle": 0, + "content": "[6] S. Moolchandani, \"Advancing Credit Risk Management: Embracing Probabilistic Graphical Models in Banking,\" International Journal of Science and Research (IJSR), vol. 13, no. 6, pp. 74-80, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.071, + 0.851, + 0.485, + 0.898 + ], + "angle": 0, + "content": "[7] S. Arya, T. Rahman and V. Gogate, \"Learning to Solve the Constrained Most Probable Explanation Task in Probabilistic Graphical Models,\" Proceedings of the 2024 International Conference on Artificial Intelligence and Statistics (AISTATS), PMLR, pp. 2791-2799, 2024." + }, + { + "type": "list", + "bbox": [ + 0.07, + 0.593, + 0.487, + 0.898 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.067, + 0.928, + 0.102 + ], + "angle": 0, + "content": "[8] J. Hu, T. An, Z. Yu, J. Du and Y. Luo, \"Contrastive Learning for Cold Start Recommendation with Adaptive Feature Fusion,\" arXiv preprint arXiv:2502.03664, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.104, + 0.929, + 0.14 + ], + "angle": 0, + "content": "[9] J. Zhan, \"Elastic Scheduling of Micro-Modules in Edge Computing Based on LSTM Prediction,\" Journal of Computer Technology and Software, vol. 4, no. 2, 2025, https://doi.org/10.5281/zenodo.14984949." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.142, + 0.928, + 0.188 + ], + "angle": 0, + "content": "[10] Q. Sun and S. Duan, \"User Intent Prediction and Response in Human-Computer Interaction via BiLSTM,\" Journal of Computer Science and Software Applications, vol. 5, no. 3, 2025, https://doi.org/10.5281/zenodo.14985042." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.19, + 0.928, + 0.226 + ], + "angle": 0, + "content": "[11] Y. Wang, \"Time-Series Premium Risk Prediction via Bidirectional Transformer,\" Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14955913." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.228, + 0.928, + 0.274 + ], + "angle": 0, + "content": "[12] T. Zhou, Z. Xu and J. Du, \"Efficient Market Signal Prediction for Blockchain HFT with Temporal Convolutional Networks,\" Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14912719." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.277, + 0.929, + 0.323 + ], + "angle": 0, + "content": "[13] X. Wang, \"Data Mining Framework Leveraging Stable Diffusion: A Unified Approach for Classification and Anomaly Detection,\" Journal of Computer Technology and Software, vol. 4, no. 1, 2025, https://doi.org/10.5281/zenodo.14843181." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.326, + 0.928, + 0.372 + ], + "angle": 0, + "content": "[14] X. Sun, \"Dynamic Distributed Scheduling for Data Stream Computing: Balancing Task Delay and Load Efficiency\", Journal of Computer Technology and Software, vol. 4, no. 1, 2025, https://doi.org/10.5281/zenodo.14785261." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.374, + 0.928, + 0.432 + ], + "angle": 0, + "content": "[15] X. Yan, J. Du, L. Wang, Y. Liang, J. Hu and B. Wang, \"The Synergistic Role of Deep Learning and Neural Architecture Search in Advancing Artificial Intelligence,\" Proceedings of the 2024 International Conference on Electronics and Devices, Computational Science (ICEDCS), pp. 452-456, Sep. 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.434, + 0.928, + 0.469 + ], + "angle": 0, + "content": "[16] P. Li, \"Improved Transformer for Cross-Domain Knowledge Extraction with Feature Alignment,\" Journal of Computer Science and Software Applications, vol. 5, no. 2, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.471, + 0.929, + 0.516 + ], + "angle": 0, + "content": "[17] S. Wang, R. Zhang and X. Shi, \"Generative UI Design with Diffusion Models: Exploring Automated Interface Creation and Human-Computer Interaction,\" Transactions on Computational and Scientific Methods, vol. 5, no. 3, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.519, + 0.928, + 0.555 + ], + "angle": 0, + "content": "[18] J. Gao, S. Lyu, G. Liu, B. Zhu, H. Zheng and X. Liao, \"A Hybrid Model for Few-Shot Text Classification Using Transfer and Meta-Learning,\" arXiv preprint arXiv:2502.09086, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.557, + 0.928, + 0.604 + ], + "angle": 0, + "content": "[19] Y. Yao, \"Time-Series Nested Reinforcement Learning for Dynamic Risk Control in Nonlinear Financial Markets,\" Transactions on Computational and Scientific Methods, vol. 5, no. 1, 2025, https://doi.org/10.5281/zenodo.14677117." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.606, + 0.928, + 0.652 + ], + "angle": 0, + "content": "[20] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. Courville and Y. Bengio, \"Generative Adversarial Nets,\" Proceedings of the 27th Advances in Neural Information Processing Systems (NeurIPS), pp. 1–9, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.654, + 0.928, + 0.701 + ], + "angle": 0, + "content": "[21] H. He and Y. Bai, \"ADASYN: Adaptive Synthetic Sampling Approach for Imbalanced Learning,\" Proceedings of the IEEE International Joint Conference on Neural Networks (IEEE World Congress on Computational Intelligence), pp. 1322-1328, 2008." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.703, + 0.929, + 0.739 + ], + "angle": 0, + "content": "[22] N. V. Chawla, K. W. Bowyer, L. O. Hall and W. P. Kegelmeyer, \"SMOTE: Synthetic Minority Over-Sampling Technique,\" Journal of Artificial Intelligence Research, vol. 16, pp. 321-357, 2002." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.741, + 0.928, + 0.765 + ], + "angle": 0, + "content": "[23] A. Liaw and M. Wiener, \"Classification and Regression by randomForest,\" R News, vol. 2, no. 3, pp. 18-22, 2002." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.766, + 0.928, + 0.801 + ], + "angle": 0, + "content": "[24] T. Chen and C. Guestrin, \"XGBoost: A Scalable Tree Boosting System,\" Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 785-794, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.804, + 0.928, + 0.839 + ], + "angle": 0, + "content": "[25] Y. Zhou and R. C. Paffenroth, \"Self-Attention Anomaly Detection,\" Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 2774-2782, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.841, + 0.928, + 0.898 + ], + "angle": 0, + "content": "[26] Z. Yang, D. Yang, C. Dyer, X. He, A. Smola and E. Hovy, \"Hierarchical Attention Networks for Document Classification,\" Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 1480-1489, 2016." + }, + { + "type": "list", + "bbox": [ + 0.513, + 0.067, + 0.929, + 0.898 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05758/607ad388-a3c2-41a7-ba56-f696cce741af_origin.pdf b/data/2025/2504_05xxx/2504.05758/607ad388-a3c2-41a7-ba56-f696cce741af_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c8af2367b4e9facaee2e19d0c50abc538013224c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05758/607ad388-a3c2-41a7-ba56-f696cce741af_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47df69e5f480452f7109af4923eed045d1c2782fbab3f638fc73bf575a2ff701 +size 513265 diff --git a/data/2025/2504_05xxx/2504.05758/full.md b/data/2025/2504_05xxx/2504.05758/full.md new file mode 100644 index 0000000000000000000000000000000000000000..78ebc8820b8f71fbdba8beccb01faf993af83724 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05758/full.md @@ -0,0 +1,173 @@ +# Addressing Class Imbalance with Probabilistic Graphical Models and Variational Inference + +Yujia Lou +University of Rochester +Rochester, USA + +Jie Liu +University of Minnesota +Minneapolis, USA + +Yuan Sheng Northeastern University Seattle, USA + +Jiawei Wang +University of California Los Angeles, USA + +Yiwei Zhang Cornell University Ithaca, USA + +Yaokun Ren* Northeastern University Seattle, USA + +Abstract-This study proposes a method for imbalanced data classification based on deep probabilistic graphical models (DPGMs) to solve the problem that traditional methods have insufficient learning ability for minority class samples. To address the classification bias caused by class imbalance, we introduce variational inference optimization probability modeling, which enables the model to adaptively adjust the representation ability of minority classes and combines the class-aware weight adjustment strategy to enhance the classifier's sensitivity to minority classes. In addition, we combine the adversarial learning mechanism to generate minority class samples in the latent space so that the model can better characterize the category boundary in the high-dimensional feature space. The experiment is evaluated on the Kaggle "Credit Card Fraud Detection" dataset and compared with a variety of advanced imbalanced classification methods (such as GAN-based sampling, BRF, XGBoost-Cost Sensitive, SAAD, HAN). The results show that the method in this study has achieved the best performance in AUC, Precision, Recall and F1-score indicators, effectively improving the recognition rate of minority classes and reducing the false alarm rate. This method can be widely used in imbalanced classification tasks such as financial fraud detection, medical diagnosis, and anomaly detection, providing a new solution for related research. + +Keywords-Deep probabilistic graphical models; Imbalanced data classification; Variational inference; Adversarial learning + +# I. INTRODUCTION + +In modern data analysis and machine learning research, the issue of data imbalance has emerged as a critical challenge affecting classifier performance. Many real-world classification tasks exhibit highly skewed class distributions, such as rare disease detection in market risk analysis and prediction [1], financial fraud detection [2-4], and cybersecurity anomaly [5] detection. In these scenarios, the number of minority class samples is significantly lower than that of the majority class. Traditional machine learning models often prioritize optimizing overall classification accuracy, leading to a substantially reduced recognition rate for minority class instances. Existing approaches primarily include data-level resampling strategies, algorithm-level cost-sensitive learning, and adaptive training strategies in deep learning. However, these methods still face several challenges in practical applications, such as potential noise introduction in resampling, the need for precise loss + +weight tuning in cost-sensitive learning, and the limited generalization ability of deep learning models on imbalanced data [6]. Consequently, designing more robust and generalizable classification methods to enhance the performance of imbalanced data classification remains a core research problem. + +In recent years, deep probabilistic graphical models (DPGMs) have gained significant attention in machine learning and pattern recognition due to their powerful representation capabilities and uncertainty modeling. Probabilistic graphical models integrate probabilistic statistical theory with graph-based methods, effectively capturing complex dependencies among variables and modeling data uncertainty [7]. Compared to conventional deep neural networks, probabilistic graphical models offer notable advantages in small-sample learning, data sparsity, and uncertainty reasoning. The emergence of Bayesian deep learning, variational inference, and graph neural networks has further strengthened the applicability of DPGMs in addressing data imbalance challenges [8]. By incorporating probabilistic priors and posterior distributions, these models can more accurately characterize minority class data while effectively accounting for uncertainty in decision-making [9], thereby enhancing the classifier's sensitivity to minority class instances. Given this, exploring how to leverage the strengths of deep probabilistic graphical models to develop a more robust imbalanced data classification framework holds both theoretical significance and practical potential. + +The purpose of this study is to investigate the effectiveness of deep probabilistic graphical models (DPGMs) in imbalanced data classification, introducing a novel strategy that emphasizes improved performance on minority class samples. Unlike heuristic methods, this approach leverages the generative capabilities of DPGMs through adaptive probabilistic modeling and structural learning, capturing richer representations of underrepresented samples. Variational inference and Bayesian optimization further refine model parameters, enhancing classification robustness while expanding theoretical insights into DPGMs. Beyond its methodological contributions, the proposed model has notable practical value. In human-computer interaction and the financial sector, user intent prediction methods [10] and time-series risk prediction strategies [11] further underscore how DPGMs can adapt to + +diverse data structures and real-time processing requirements. Moreover, the efficient market signal detection approach proposed by Zhou et al. [12] highlights the role of advanced neural architectures in combination with DPGMs for continuous data streams. These complementary research directions illustrate the wide-ranging applicability and potential extensions of deep probabilistic models in various industries and research fields. By integrating probabilistic inference and deep learning, this work not only addresses the challenges of imbalanced classification but also broadens the application of deep probabilistic approaches, ultimately enriching the machine learning toolkit for various critical domains. + +# II. METHOD + +Suppose dataset $D = \{(x_{i},y_{i})\}_{i = 1}^{N}$ , where $x_{i}\in R^{d}$ represents input samples and $y_{i}\in \{0,1\}$ represents category labels. Assuming that the ratio of positive and negative samples is seriously unbalanced, that is, $|\{yi = 1\} | < < |\{y_i = 0\} |$ , traditional deep learning methods tend to favor the majority class when optimizing the loss function. Therefore, we introduce deep probabilistic graphical models (DPGMs). By constructing a joint probability distribution and incorporating variational inference techniques inspired by Wang [13], the proposed model adaptively enhances the representation capability for minority class samples, leading to improved classification performance. This approach effectively mitigates the challenges posed by class imbalance, thereby ensuring that minority class samples are accurately and adequately represented within the learned feature space. Additionally, leveraging dynamic distributed scheduling methodologies as discussed by Sun [14] enables efficient handling of data streams, optimizing both task delays and load balancing. Such strategies significantly contribute to maintaining computational efficiency and enhancing the real-time responsiveness of the system. Furthermore, the synergistic integration of deep learning methods and neural architecture search techniques outlined by Yan et al. [15] further refines the adaptive representational adjustments, ensuring the robustness, accuracy, and generalization capabilities of the proposed classification framework. The architecture of the probabilistic graphical model is shown in Figure 1. + +First, we define a hidden variable $z$ to model the potential representation of the input data $x$ , and use the Bayesian generation model to describe the data generation process: + +$$ +p (x, y, z) = p (y \mid z) p (z \mid x) p (x) +$$ + +Among them, $p(y \mid z)$ represents the posterior distribution of the classifier for the latent variable, and $p(z \mid x)$ represents the prior distribution of the latent variable. Based on this, our goal is to optimize the model parameters by maximizing the marginal log-likelihood: + +$$ +\log (y \mid x) = \log \int p (y \mid z) p (z \mid x) d z +$$ + +![](images/c8945917346c994832f059f7db5489e15026b3e9e6b1a93768160bb4b7790869.jpg) +Figure 1. The architecture of the probabilistic graphical model + +However, this integral is difficult to compute directly, so we use variational inference to approximate the solution. Define a variational distribution $q(z|x)$ to approximate $p(z|x)$ , and optimize the model through the evidence lower bound (ELBO): + +$$ +\log p (y \mid x) \geq E _ {q (z \mid x)} [ \log p (y \mid z) ] - D _ {K L} (q (z \mid x) \| p (z)) +$$ + +Among them, $D_{KL}(\cdot \| \cdot)$ represents the Kullback-Leibler divergence, which is used to measure the gap between the approximate distribution and the true posterior distribution. In order to further optimize the classification performance of minority classes, we introduce category-aware variational inference and explicitly enhance the weight of minority class samples in the loss function: + +$$ +L = \sum_ {i = 1} ^ {N} w (y _ {i}) \left[ E _ {q (z | x _ {i})} [ \log p (y _ {i} \mid z) ] - \right. +$$ + +Among them, $w(y_{i})$ is the category weight coefficient, and a higher weight is set for minority class samples, for example: + +$$ +w \left(y _ {i}\right) = \frac {N _ {\text {m a j o r}}}{N _ {\text {m i n o r}}} +$$ + +Where A and B represent the number of samples in the majority class and the minority class, respectively. + +In the specific implementation, we use variational autoencoder (VAE) as the probability generation model, so that $p(z \mid x)$ obeys the normal distribution: + +$$ +q (z \mid x) = N (\mu (x), \sigma^ {2} (x)) +$$ + +And optimize it by reparameterization technique: + +$$ +z = \mu (x) + \sigma (x) \cdot \varepsilon , \quad \varepsilon \sim \mathrm {N} (0, \mathrm {I}) +$$ + +In this way, the model can learn A and B through the neural network to obtain a more stable gradient. In addition, building + +on previous adversarial learning frameworks [16] and generative design concepts [17], we introduce an adversarial learning mechanism to optimize the category distribution. This mechanism strengthens the model's ability to differentiate minority class samples by ensuring that generated data more closely matches real distributions. Specifically, a discriminator is constructed to distinguish the distribution of generated minority class samples from authentic instances, ensuring closer alignment with observed data. Furthermore, incorporating few-shot learning strategies [18] and dynamic adaptation techniques [19] enhances the model's resilience in limited-data conditions. The optimization goal of the discriminator is as follows: + +$$ +\min _ {G} \max _ {D} E _ {x \sim p _ {\text {d a t a}} (x)} [ \log D (z) ] + +$$ + +$$ +E _ {x \sim q _ {(z | x)}} [ \log (1 - D (z)) ] +$$ + +Through this adversarial learning method, the model can capture the characteristics of minority classes more accurately and avoid the overfitting problem of minority class samples. + +In summary, this study combines deep probabilistic graph models, variational reasoning, and adversarial learning methods to optimize imbalanced data classification tasks. + +# III. EXPERIMENT + +# A. Dataset + +This study employs the Kaggle "Credit Card Fraud Detection" dataset, consisting of 284,807 credit card transactions from a European institution. Of these transactions, 492 are labeled as fraudulent, indicating a highly imbalanced class distribution (approximately 1:577). Each record contains 30 features, including 28 anonymized components derived from Principal Component Analysis (PCA), along with transaction time and amount. Personally identifiable information has been removed, leaving only numerical features, which were preprocessed through normalization, outlier detection, and data augmentation. + +Given the severe class imbalance, direct application of conventional classification models often leads to bias toward the majority class, compromising fraud detection. To address this challenge, we employed various sampling strategies, including under-sampling, over-sampling, and the Synthetic Minority Over-sampling Technique (SMOTE), to generate synthetic samples for the minority class and improve representation. We also evaluated the impact of different sampling methods on model stability and performance. The dataset was split into $70\%$ training, $15\%$ validation, and $15\%$ test sets. Evaluation metrics included Precision, Recall, F1-score, and the Area Under the Receiver Operating Characteristic Curve (AUC-ROC). Comparative experiments with different data augmentation techniques demonstrated that integrating probabilistic modeling with these strategies substantially enhances fraud detection and reduces false positives, thereby improving the model's reliability in practical applications. + +# B. Experiment Result + +This study primarily compares deep probabilistic graphical models (DPGMs) with several advanced imbalanced data classification methods to validate their effectiveness. First, we select the generative adversarial network (GAN)-based methods, such as WGANGP-SMOTE and ADASYN-GAN, which leverage GANs to synthesize minority class samples and mitigate data imbalance. Second, we evaluate class-adaptive ensemble learning methods, including Balanced Random Forest (BRF) and XGBoost-Cost Sensitive, which enhance minority class learning by adjusting sampling strategies or modifying loss functions. Additionally, we compare attention-based imbalanced classification methods, such as Self-Attention Anomaly Detection (SAAD) and Hierarchical Attention Networks (HAN), which have demonstrated strong anomaly detection capabilities in credit card fraud detection and similar tasks. Through these comparative experiments, we aim to comprehensively assess the advantages of deep probabilistic graphical models in minority class representation learning, generalization ability, and classification performance. + +Table 1. Integration Testing 1 + +
ModelAUCPrecisionRecallF1-Score
GAN [20]0.8420.7160.6540.684
ADASYN [21]0.8560.7290.6680.697
SMOTE [22]0.8710.7420.6830.711
BRF [23]0.8890.7640.7210.742
XGBOOST-Cost [24]0.9030.7790.7350.757
SAAD [25]0.9150.7930.7510.771
HAN [26]0.9270.8060.7680.786
Ours0.9410.8220.7850.803
+ +Our proposed deep probabilistic graphical model (DPGM) outperforms all compared methods on every evaluation metric, demonstrating superior generalization in imbalanced classification. With an AUC of 0.941, it clearly surpasses traditional oversampling (e.g., SMOTE, ADASYN) and ensemble methods (e.g., BRF, XGBoost-Cost Sensitive). Importantly, it achieves a Recall of 0.785 while maintaining a Precision of 0.822, reflecting its effectiveness in detecting minority class samples without overly biasing the model. + +DPGMs model the latent distribution of minority samples more effectively than conventional oversampling, thereby reducing overfitting. Compared to attention-based methods (e.g., SAAD, HAN), our model delivers a higher F1-score (0.803 versus 0.786), illustrating the benefits of uncertainty-aware probabilistic modeling. Overall, these results confirm that combining deep probabilistic modeling with variational inference optimizes class distribution and enhances minority class discrimination, offering a robust solution for imbalanced data classification. Figure 2 presents the corresponding loss function trajectory. + +![](images/e739f93a7fbde9cf2a99b06d3897132dc8dbed0714b55865b79655a88af9e655.jpg) +Figure 2. Loss function drop graph + +From the loss function decline curve, both the training loss (Train) and test loss (Test) exhibit a clear downward trend during training iterations. This indicates that the model continuously learns features and optimizes parameters to effectively reduce errors. In the initial phase of training (between 5,000 and 25,000 iterations), the loss decreases at the fastest rate, suggesting that the model rapidly learns data representations and significantly improves classification performance. However, as the number of iterations increases, the rate of loss reduction gradually slows down and stabilizes after approximately 125,000 iterations. This trend implies that the model is approaching convergence, where further optimization yields diminishing returns. + +A comparison of the training and test loss curves reveals that the test loss consistently remains lower than the training loss, and both curves follow a similar trajectory. This observation suggests that the model demonstrates good generalization ability without exhibiting significant overfitting. If the training loss were substantially lower than the test loss, it would indicate that the model performs well on training data but struggles to generalize to unseen data. However, the current loss curves do not display such a pattern, implying that the applied regularization strategies and optimization methods effectively mitigate overfitting. Furthermore, the test loss decreases at a rate similar to that of the training loss in the initial stages, further validating the model's stable learning process. Overall, these experimental results confirm that the model successfully optimizes the loss function during training, leading to a substantial reduction in both training and test errors ultimately reaching a relatively low level. This outcome suggests that the chosen training strategy, hyperparameter configuration, and optimization techniques are effective, allowing the model to learn the data distribution efficiently while maintaining strong generalization performance. Additionally, the stabilization of the loss curves indicates that the training process has effectively converged, suggesting that training can be halted or fine-tuned further to ensure optimal performance on the test set. Finally, this paper also gives the T-SNE results after training, as shown in Figure 3. + +![](images/6ad69f33204c10e92da4741df3cc69002914239055daa6956253b03c994af45e.jpg) +Figure 3. T-SNE result map after training + +From the T-SNE results, it is evident that after training, the data points form distinct cluster-like distributions in the two-dimensional space, indicating that the model has successfully learned the feature differences between different classes. As observed in the visualization, the two categories (represented in blue and red) are well separated, suggesting that the model has developed strong discriminative capabilities in the high-dimensional feature space. The presence of a clear boundary between the classes demonstrates that the model effectively extracts distinguishing features without causing sample overlap, thereby validating its effectiveness. + +Furthermore, the overall data distribution demonstrates that the T-SNE dimensionality reduction retains intra-class compactness while ensuring inter-class separability. The blue and red data points are well-clustered in distinct regions without significant overlap, indicating that the model effectively distinguishes between different categories in the feature space. Even when dealing with an imbalanced dataset, the model successfully learns the distribution patterns of the minority class. + +However, while the T-SNE results illustrate a clear class separation, further quantitative evaluation is necessary to assess the robustness of the classification boundaries. For instance, if significant distribution shifts occur in certain test data samples, it may indicate that the model is still susceptible to overfitting. Additionally, since T-SNE is a nonlinear dimensionality reduction method, it may exaggerate the separation between classes, meaning that the actual decision boundaries in the high-dimensional space may not be as well-defined as they appear in the visualization. Therefore, a comprehensive evaluation incorporating classification metrics such as Precision, Recall, and AUC is essential to fully validate the model's generalization performance. + +# IV. CONCLUSION + +This study proposes an imbalanced data classification method based on deep probabilistic graphical models (DPGMs) and validates its effectiveness through experiments on a credit card fraud detection dataset. The experimental results demonstrate that the proposed method outperforms traditional oversampling techniques, ensemble learning approaches, and attention-based models in key metrics such as AUC and F1-score, confirming the effectiveness of probabilistic modeling in handling imbalanced classification tasks. By integrating variational inference, class-weight adjustment, and adversarial learning mechanisms, our model more accurately captures the feature distribution of the minority class, enhancing the classifier's discriminative ability while mitigating the overfitting issues commonly observed in traditional methods. + +Despite the promising performance of our approach in imbalanced data classification, several aspects warrant further improvement. For instance, in cases of extreme imbalance, the minority class samples may still provide insufficient information, potentially limiting the model's generalization capability. Additionally, deep probabilistic graphical models involve high computational complexity, requiring extensive sampling and variational inference steps during training, which may impact deployment efficiency. Therefore, future research could focus on optimizing the computational efficiency of probabilistic modeling to enhance the model's adaptability across different data distributions. Several directions can be explored in future research. More efficient Bayesian optimization methods can be investigated to reduce the computational cost of DPGMs, making them applicable to larger-scale imbalanced datasets. Furthermore, in practical applications, federated learning frameworks can be incorporated to enable cross-institutional model training while preserving data privacy, thereby enhancing the applicability of imbalanced classification methods in real-world scenarios. + +# REFERENCES + +[1] Y. Cheng, Z. Xu, Y. Chen, Y. Wang, Z. Lin and J. Liu, "A Deep Learning Framework Integrating CNN and BiLSTM for Financial Systemic Risk Analysis and Prediction," arXiv preprint arXiv:2502.06847, 2025. +[2] J. Liu, "Multimodal Data-Driven Factor Models for Stock Market Forecasting," Journal of Computer Technology and Software, vol. 4, no. 2, 2025, https://doi.org/10.5281/zenodo.14984969. +[3] Y. Deng, "A Hybrid Network Congestion Prediction Method Integrating Association Rules and LSTM for Enhanced Spatiotemporal Forecasting," Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14912727. +[4] P. Feng, "Hybrid BiLSTM-Transformer Model for Identifying Fraudulent Transactions in Financial Systems," Journal of Computer Science and Software Applications, vol. 5, no. 3, 2025, https://doi.org/10.5281/zenodo.14985026. +[5] X. Du, "Optimized Convolutional Neural Network for Intelligent Financial Statement Anomaly Detection," Journal of Computer Technology and Software, vol. 3, no. 9, pp. 11-15, 2024. +[6] S. Moolchandani, "Advancing Credit Risk Management: Embracing Probabilistic Graphical Models in Banking," International Journal of Science and Research (IJSR), vol. 13, no. 6, pp. 74-80, 2024. +[7] S. Arya, T. Rahman and V. Gogate, "Learning to Solve the Constrained Most Probable Explanation Task in Probabilistic Graphical Models," Proceedings of the 2024 International Conference on Artificial Intelligence and Statistics (AISTATS), PMLR, pp. 2791-2799, 2024. + +[8] J. Hu, T. An, Z. Yu, J. Du and Y. Luo, "Contrastive Learning for Cold Start Recommendation with Adaptive Feature Fusion," arXiv preprint arXiv:2502.03664, 2025. +[9] J. Zhan, "Elastic Scheduling of Micro-Modules in Edge Computing Based on LSTM Prediction," Journal of Computer Technology and Software, vol. 4, no. 2, 2025, https://doi.org/10.5281/zenodo.14984949. +[10] Q. Sun and S. Duan, "User Intent Prediction and Response in Human-Computer Interaction via BiLSTM," Journal of Computer Science and Software Applications, vol. 5, no. 3, 2025, https://doi.org/10.5281/zenodo.14985042. +[11] Y. Wang, "Time-Series Premium Risk Prediction via Bidirectional Transformer," Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14955913. +[12] T. Zhou, Z. Xu and J. Du, "Efficient Market Signal Prediction for Blockchain HFT with Temporal Convolutional Networks," Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14912719. +[13] X. Wang, "Data Mining Framework Leveraging Stable Diffusion: A Unified Approach for Classification and Anomaly Detection," Journal of Computer Technology and Software, vol. 4, no. 1, 2025, https://doi.org/10.5281/zenodo.14843181. +[14] X. Sun, "Dynamic Distributed Scheduling for Data Stream Computing: Balancing Task Delay and Load Efficiency", Journal of Computer Technology and Software, vol. 4, no. 1, 2025, https://doi.org/10.5281/zenodo.14785261. +[15] X. Yan, J. Du, L. Wang, Y. Liang, J. Hu and B. Wang, "The Synergistic Role of Deep Learning and Neural Architecture Search in Advancing Artificial Intelligence," Proceedings of the 2024 International Conference on Electronics and Devices, Computational Science (ICEDCS), pp. 452-456, Sep. 2024. +[16] P. Li, "Improved Transformer for Cross-Domain Knowledge Extraction with Feature Alignment," Journal of Computer Science and Software Applications, vol. 5, no. 2, 2024. +[17] S. Wang, R. Zhang and X. Shi, "Generative UI Design with Diffusion Models: Exploring Automated Interface Creation and Human-Computer Interaction," Transactions on Computational and Scientific Methods, vol. 5, no. 3, 2025. +[18] J. Gao, S. Lyu, G. Liu, B. Zhu, H. Zheng and X. Liao, "A Hybrid Model for Few-Shot Text Classification Using Transfer and Meta-Learning," arXiv preprint arXiv:2502.09086, 2025. +[19] Y. Yao, "Time-Series Nested Reinforcement Learning for Dynamic Risk Control in Nonlinear Financial Markets," Transactions on Computational and Scientific Methods, vol. 5, no. 1, 2025, https://doi.org/10.5281/zenodo.14677117. +[20] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. Courville and Y. Bengio, "Generative Adversarial Nets," Proceedings of the 27th Advances in Neural Information Processing Systems (NeurIPS), pp. 1–9, 2014. +[21] H. He and Y. Bai, "ADASYN: Adaptive Synthetic Sampling Approach for Imbalanced Learning," Proceedings of the IEEE International Joint Conference on Neural Networks (IEEE World Congress on Computational Intelligence), pp. 1322-1328, 2008. +[22] N. V. Chawla, K. W. Bowyer, L. O. Hall and W. P. Kegelmeyer, "SMOTE: Synthetic Minority Over-Sampling Technique," Journal of Artificial Intelligence Research, vol. 16, pp. 321-357, 2002. +[23] A. Liaw and M. Wiener, "Classification and Regression by randomForest," R News, vol. 2, no. 3, pp. 18-22, 2002. +[24] T. Chen and C. Guestrin, "XGBoost: A Scalable Tree Boosting System," Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 785-794, 2016. +[25] Y. Zhou and R. C. Paffenroth, "Self-Attention Anomaly Detection," Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 2774-2782, 2019. +[26] Z. Yang, D. Yang, C. Dyer, X. He, A. Smola and E. Hovy, "Hierarchical Attention Networks for Document Classification," Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 1480-1489, 2016. \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05758/images/1e94108d7e948cbe2234be390b158b637b2839b2600cdc7b10c6f6f9d8c86757.jpg b/data/2025/2504_05xxx/2504.05758/images/1e94108d7e948cbe2234be390b158b637b2839b2600cdc7b10c6f6f9d8c86757.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f192bc71a7e6cb43da98036e291f7b5b76f34407 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05758/images/1e94108d7e948cbe2234be390b158b637b2839b2600cdc7b10c6f6f9d8c86757.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ea807c68aa3250bcb7c4dba501088616e42e751e60a4c9ff79ec67ae9b2204a +size 4260 diff --git a/data/2025/2504_05xxx/2504.05758/images/3c81cf688d23b9a5c258f1a910d4b6375efaa9b4bf011b786cf38f50312871eb.jpg b/data/2025/2504_05xxx/2504.05758/images/3c81cf688d23b9a5c258f1a910d4b6375efaa9b4bf011b786cf38f50312871eb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..55d366f348ea33e28c3fc749cf319637f2fb769d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05758/images/3c81cf688d23b9a5c258f1a910d4b6375efaa9b4bf011b786cf38f50312871eb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18d640f10b62b03b4a8513adc32f7ac07c7b4eda62ee61147cf7adbdb0c7ba92 +size 5216 diff --git a/data/2025/2504_05xxx/2504.05758/images/45c9c4e23af1f7fcd71e4e1f1c42005c9812a59abe82b5be235e2ec375883ea3.jpg b/data/2025/2504_05xxx/2504.05758/images/45c9c4e23af1f7fcd71e4e1f1c42005c9812a59abe82b5be235e2ec375883ea3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..24a85b92fd1314fcde0b7e5ecda4a7d043066c8c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05758/images/45c9c4e23af1f7fcd71e4e1f1c42005c9812a59abe82b5be235e2ec375883ea3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfbab8ed11a1678a8f825d5ee34a16ac88271ba70c79e1d2097bbf0b8c035cc0 +size 4223 diff --git a/data/2025/2504_05xxx/2504.05758/images/4bf722e0285a33bb56a86df84ca351fde95bf2e681d8d0f451530ce70b8ce5ea.jpg b/data/2025/2504_05xxx/2504.05758/images/4bf722e0285a33bb56a86df84ca351fde95bf2e681d8d0f451530ce70b8ce5ea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0868947d8508e2a8216c33b7a4d5cab0b661262a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05758/images/4bf722e0285a33bb56a86df84ca351fde95bf2e681d8d0f451530ce70b8ce5ea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd28e2b59de2d57d61351778ed0371cba5681173b428022d3a671abbf49f21ad +size 7883 diff --git a/data/2025/2504_05xxx/2504.05758/images/57a239a94222df9728fff416dc0f582b2ccc3b70ec254c8b69c33076069bada5.jpg b/data/2025/2504_05xxx/2504.05758/images/57a239a94222df9728fff416dc0f582b2ccc3b70ec254c8b69c33076069bada5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe6661d9d10b87b5f2a9f7bb9b733837798888c4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05758/images/57a239a94222df9728fff416dc0f582b2ccc3b70ec254c8b69c33076069bada5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f42c52f9f8b544e33112afcdf38b5859bd7e4ef72c7f8c14bec504b30dc6a29 +size 9072 diff --git a/data/2025/2504_05xxx/2504.05758/images/6ad69f33204c10e92da4741df3cc69002914239055daa6956253b03c994af45e.jpg b/data/2025/2504_05xxx/2504.05758/images/6ad69f33204c10e92da4741df3cc69002914239055daa6956253b03c994af45e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4f7b359aa4e3bfe326dde38aa15ee41ed4f430d8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05758/images/6ad69f33204c10e92da4741df3cc69002914239055daa6956253b03c994af45e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1f56a72efe9e28426cd2d9c6043c83248a6a05a786e9687ace68cc83be50e61 +size 46200 diff --git a/data/2025/2504_05xxx/2504.05758/images/93059c0b0200dfbf1341842c23946121be5bcecbebeabde6e3839117f7341335.jpg b/data/2025/2504_05xxx/2504.05758/images/93059c0b0200dfbf1341842c23946121be5bcecbebeabde6e3839117f7341335.jpg new file mode 100644 index 0000000000000000000000000000000000000000..17920edc15e1e0716117bcb60761e50dd693219e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05758/images/93059c0b0200dfbf1341842c23946121be5bcecbebeabde6e3839117f7341335.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71ebaeae93154720fb4d8027c3e697338a54a845541ceb57c818d8bea29f014b +size 3583 diff --git a/data/2025/2504_05xxx/2504.05758/images/acdfe8a5e054f9435eea5074a1a9c3feb17a8c5ca109e1f7b62d7971197424bb.jpg b/data/2025/2504_05xxx/2504.05758/images/acdfe8a5e054f9435eea5074a1a9c3feb17a8c5ca109e1f7b62d7971197424bb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1b11ed15feaf5402e90427eea53e64e4ce8e69e2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05758/images/acdfe8a5e054f9435eea5074a1a9c3feb17a8c5ca109e1f7b62d7971197424bb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81889ef35c89581181229e8f9d470a9167b2e338b3a8f40ea353bc5fd6ff13c7 +size 3286 diff --git a/data/2025/2504_05xxx/2504.05758/images/bb202b08b189fda03ed9f8a34755cc4f9f244fe15241e298af9a4ebaca3f9733.jpg b/data/2025/2504_05xxx/2504.05758/images/bb202b08b189fda03ed9f8a34755cc4f9f244fe15241e298af9a4ebaca3f9733.jpg new file mode 100644 index 0000000000000000000000000000000000000000..587144400227bc81dc7911e67d44e8f75832f6a5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05758/images/bb202b08b189fda03ed9f8a34755cc4f9f244fe15241e298af9a4ebaca3f9733.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6a5b059796ccc4a4a502a3a440fc3b8b1472d72fbb69e9c7c71a7e05090c741 +size 5795 diff --git a/data/2025/2504_05xxx/2504.05758/images/c8945917346c994832f059f7db5489e15026b3e9e6b1a93768160bb4b7790869.jpg b/data/2025/2504_05xxx/2504.05758/images/c8945917346c994832f059f7db5489e15026b3e9e6b1a93768160bb4b7790869.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b9776912d9034c69aa2c0afbfa2b3ac7bc4802f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05758/images/c8945917346c994832f059f7db5489e15026b3e9e6b1a93768160bb4b7790869.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19bfa36918913fb79f881e34f4041a3410e05352ce92b99633182d1bd77814c9 +size 22603 diff --git a/data/2025/2504_05xxx/2504.05758/images/e52f8ad9e8d735a3ab74ab8773298cc5f69f2a2f31b5ee1d75b171e7cdd13362.jpg b/data/2025/2504_05xxx/2504.05758/images/e52f8ad9e8d735a3ab74ab8773298cc5f69f2a2f31b5ee1d75b171e7cdd13362.jpg new file mode 100644 index 0000000000000000000000000000000000000000..22669d6231f2262199f6bdd2a00f4e3b5598e20b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05758/images/e52f8ad9e8d735a3ab74ab8773298cc5f69f2a2f31b5ee1d75b171e7cdd13362.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:071612ed2b5b30b5b58e5b211878dd2ef1b011bae939b60ff27aa4ca6344b269 +size 4932 diff --git a/data/2025/2504_05xxx/2504.05758/images/e739f93a7fbde9cf2a99b06d3897132dc8dbed0714b55865b79655a88af9e655.jpg b/data/2025/2504_05xxx/2504.05758/images/e739f93a7fbde9cf2a99b06d3897132dc8dbed0714b55865b79655a88af9e655.jpg new file mode 100644 index 0000000000000000000000000000000000000000..804fedcc6279e66de87f9cfd189f7567668f1970 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05758/images/e739f93a7fbde9cf2a99b06d3897132dc8dbed0714b55865b79655a88af9e655.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:789d1e670f223ddeb323d2df129cd32faa353832e3b0bfaceb6088181c137e6e +size 25386 diff --git a/data/2025/2504_05xxx/2504.05758/images/ffd677e4e9724b33fd97b088d2b56b347e397c7cd6d17964076bb5ea1b746ab4.jpg b/data/2025/2504_05xxx/2504.05758/images/ffd677e4e9724b33fd97b088d2b56b347e397c7cd6d17964076bb5ea1b746ab4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..56e68db6abd3793afb3dd064558f87db4e16ccd9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05758/images/ffd677e4e9724b33fd97b088d2b56b347e397c7cd6d17964076bb5ea1b746ab4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf53ba3483f07cca48303891de6035334483ac87d4d3bee71268f47634d21362 +size 47049 diff --git a/data/2025/2504_05xxx/2504.05758/layout.json b/data/2025/2504_05xxx/2504.05758/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..ce344e2548762df88f9b674b351cdac16c6268ef --- /dev/null +++ b/data/2025/2504_05xxx/2504.05758/layout.json @@ -0,0 +1,3399 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 80, + 55, + 531, + 109 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 55, + 531, + 109 + ], + "spans": [ + { + "bbox": [ + 80, + 55, + 531, + 109 + ], + "type": "text", + "content": "Addressing Class Imbalance with Probabilistic Graphical Models and Variational Inference" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 80, + 114, + 170, + 151 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 114, + 170, + 151 + ], + "spans": [ + { + "bbox": [ + 80, + 114, + 170, + 151 + ], + "type": "text", + "content": "Yujia Lou \nUniversity of Rochester \nRochester, USA" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 260, + 114, + 351, + 151 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 260, + 114, + 351, + 151 + ], + "spans": [ + { + "bbox": [ + 260, + 114, + 351, + 151 + ], + "type": "text", + "content": "Jie Liu \nUniversity of Minnesota \nMinneapolis, USA" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 442, + 114, + 531, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 442, + 114, + 531, + 146 + ], + "spans": [ + { + "bbox": [ + 442, + 114, + 531, + 146 + ], + "type": "text", + "content": "Yuan Sheng Northeastern University Seattle, USA" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 80, + 175, + 168, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 175, + 168, + 209 + ], + "spans": [ + { + "bbox": [ + 80, + 175, + 168, + 209 + ], + "type": "text", + "content": "Jiawei Wang \nUniversity of California Los Angeles, USA" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 270, + 175, + 340, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 270, + 175, + 340, + 208 + ], + "spans": [ + { + "bbox": [ + 270, + 175, + 340, + 208 + ], + "type": "text", + "content": "Yiwei Zhang Cornell University Ithaca, USA" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 442, + 175, + 531, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 442, + 175, + 531, + 209 + ], + "spans": [ + { + "bbox": [ + 442, + 175, + 531, + 209 + ], + "type": "text", + "content": "Yaokun Ren* Northeastern University Seattle, USA" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 41, + 247, + 299, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 247, + 299, + 485 + ], + "spans": [ + { + "bbox": [ + 41, + 247, + 299, + 485 + ], + "type": "text", + "content": "Abstract-This study proposes a method for imbalanced data classification based on deep probabilistic graphical models (DPGMs) to solve the problem that traditional methods have insufficient learning ability for minority class samples. To address the classification bias caused by class imbalance, we introduce variational inference optimization probability modeling, which enables the model to adaptively adjust the representation ability of minority classes and combines the class-aware weight adjustment strategy to enhance the classifier's sensitivity to minority classes. In addition, we combine the adversarial learning mechanism to generate minority class samples in the latent space so that the model can better characterize the category boundary in the high-dimensional feature space. The experiment is evaluated on the Kaggle \"Credit Card Fraud Detection\" dataset and compared with a variety of advanced imbalanced classification methods (such as GAN-based sampling, BRF, XGBoost-Cost Sensitive, SAAD, HAN). The results show that the method in this study has achieved the best performance in AUC, Precision, Recall and F1-score indicators, effectively improving the recognition rate of minority classes and reducing the false alarm rate. This method can be widely used in imbalanced classification tasks such as financial fraud detection, medical diagnosis, and anomaly detection, providing a new solution for related research." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 41, + 495, + 293, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 495, + 293, + 517 + ], + "spans": [ + { + "bbox": [ + 41, + 495, + 293, + 517 + ], + "type": "text", + "content": "Keywords-Deep probabilistic graphical models; Imbalanced data classification; Variational inference; Adversarial learning" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 129, + 524, + 218, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 524, + 218, + 534 + ], + "spans": [ + { + "bbox": [ + 129, + 524, + 218, + 534 + ], + "type": "text", + "content": "I. INTRODUCTION" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 41, + 539, + 298, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 539, + 298, + 715 + ], + "spans": [ + { + "bbox": [ + 41, + 539, + 298, + 715 + ], + "type": "text", + "content": "In modern data analysis and machine learning research, the issue of data imbalance has emerged as a critical challenge affecting classifier performance. Many real-world classification tasks exhibit highly skewed class distributions, such as rare disease detection in market risk analysis and prediction [1], financial fraud detection [2-4], and cybersecurity anomaly [5] detection. In these scenarios, the number of minority class samples is significantly lower than that of the majority class. Traditional machine learning models often prioritize optimizing overall classification accuracy, leading to a substantially reduced recognition rate for minority class instances. Existing approaches primarily include data-level resampling strategies, algorithm-level cost-sensitive learning, and adaptive training strategies in deep learning. However, these methods still face several challenges in practical applications, such as potential noise introduction in resampling, the need for precise loss" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 310, + 247, + 568, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 247, + 568, + 313 + ], + "spans": [ + { + "bbox": [ + 310, + 247, + 568, + 313 + ], + "type": "text", + "content": "weight tuning in cost-sensitive learning, and the limited generalization ability of deep learning models on imbalanced data [6]. Consequently, designing more robust and generalizable classification methods to enhance the performance of imbalanced data classification remains a core research problem." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 310, + 318, + 568, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 318, + 568, + 549 + ], + "spans": [ + { + "bbox": [ + 310, + 318, + 568, + 549 + ], + "type": "text", + "content": "In recent years, deep probabilistic graphical models (DPGMs) have gained significant attention in machine learning and pattern recognition due to their powerful representation capabilities and uncertainty modeling. Probabilistic graphical models integrate probabilistic statistical theory with graph-based methods, effectively capturing complex dependencies among variables and modeling data uncertainty [7]. Compared to conventional deep neural networks, probabilistic graphical models offer notable advantages in small-sample learning, data sparsity, and uncertainty reasoning. The emergence of Bayesian deep learning, variational inference, and graph neural networks has further strengthened the applicability of DPGMs in addressing data imbalance challenges [8]. By incorporating probabilistic priors and posterior distributions, these models can more accurately characterize minority class data while effectively accounting for uncertainty in decision-making [9], thereby enhancing the classifier's sensitivity to minority class instances. Given this, exploring how to leverage the strengths of deep probabilistic graphical models to develop a more robust imbalanced data classification framework holds both theoretical significance and practical potential." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 310, + 554, + 569, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 554, + 569, + 719 + ], + "spans": [ + { + "bbox": [ + 310, + 554, + 569, + 719 + ], + "type": "text", + "content": "The purpose of this study is to investigate the effectiveness of deep probabilistic graphical models (DPGMs) in imbalanced data classification, introducing a novel strategy that emphasizes improved performance on minority class samples. Unlike heuristic methods, this approach leverages the generative capabilities of DPGMs through adaptive probabilistic modeling and structural learning, capturing richer representations of underrepresented samples. Variational inference and Bayesian optimization further refine model parameters, enhancing classification robustness while expanding theoretical insights into DPGMs. Beyond its methodological contributions, the proposed model has notable practical value. In human-computer interaction and the financial sector, user intent prediction methods [10] and time-series risk prediction strategies [11] further underscore how DPGMs can adapt to" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 41, + 53, + 298, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 53, + 298, + 185 + ], + "spans": [ + { + "bbox": [ + 41, + 53, + 298, + 185 + ], + "type": "text", + "content": "diverse data structures and real-time processing requirements. Moreover, the efficient market signal detection approach proposed by Zhou et al. [12] highlights the role of advanced neural architectures in combination with DPGMs for continuous data streams. These complementary research directions illustrate the wide-ranging applicability and potential extensions of deep probabilistic models in various industries and research fields. By integrating probabilistic inference and deep learning, this work not only addresses the challenges of imbalanced classification but also broadens the application of deep probabilistic approaches, ultimately enriching the machine learning toolkit for various critical domains." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 193, + 204, + 203 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 193, + 204, + 203 + ], + "spans": [ + { + "bbox": [ + 141, + 193, + 204, + 203 + ], + "type": "text", + "content": "II. METHOD" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 41, + 209, + 298, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 209, + 298, + 525 + ], + "spans": [ + { + "bbox": [ + 41, + 209, + 298, + 525 + ], + "type": "text", + "content": "Suppose dataset " + }, + { + "bbox": [ + 41, + 209, + 298, + 525 + ], + "type": "inline_equation", + "content": "D = \\{(x_{i},y_{i})\\}_{i = 1}^{N}" + }, + { + "bbox": [ + 41, + 209, + 298, + 525 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 41, + 209, + 298, + 525 + ], + "type": "inline_equation", + "content": "x_{i}\\in R^{d}" + }, + { + "bbox": [ + 41, + 209, + 298, + 525 + ], + "type": "text", + "content": " represents input samples and " + }, + { + "bbox": [ + 41, + 209, + 298, + 525 + ], + "type": "inline_equation", + "content": "y_{i}\\in \\{0,1\\}" + }, + { + "bbox": [ + 41, + 209, + 298, + 525 + ], + "type": "text", + "content": " represents category labels. Assuming that the ratio of positive and negative samples is seriously unbalanced, that is, " + }, + { + "bbox": [ + 41, + 209, + 298, + 525 + ], + "type": "inline_equation", + "content": "|\\{yi = 1\\} | < < |\\{y_i = 0\\} |" + }, + { + "bbox": [ + 41, + 209, + 298, + 525 + ], + "type": "text", + "content": ", traditional deep learning methods tend to favor the majority class when optimizing the loss function. Therefore, we introduce deep probabilistic graphical models (DPGMs). By constructing a joint probability distribution and incorporating variational inference techniques inspired by Wang [13], the proposed model adaptively enhances the representation capability for minority class samples, leading to improved classification performance. This approach effectively mitigates the challenges posed by class imbalance, thereby ensuring that minority class samples are accurately and adequately represented within the learned feature space. Additionally, leveraging dynamic distributed scheduling methodologies as discussed by Sun [14] enables efficient handling of data streams, optimizing both task delays and load balancing. Such strategies significantly contribute to maintaining computational efficiency and enhancing the real-time responsiveness of the system. Furthermore, the synergistic integration of deep learning methods and neural architecture search techniques outlined by Yan et al. [15] further refines the adaptive representational adjustments, ensuring the robustness, accuracy, and generalization capabilities of the proposed classification framework. The architecture of the probabilistic graphical model is shown in Figure 1." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 41, + 529, + 298, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 529, + 298, + 564 + ], + "spans": [ + { + "bbox": [ + 41, + 529, + 298, + 564 + ], + "type": "text", + "content": "First, we define a hidden variable " + }, + { + "bbox": [ + 41, + 529, + 298, + 564 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 41, + 529, + 298, + 564 + ], + "type": "text", + "content": " to model the potential representation of the input data " + }, + { + "bbox": [ + 41, + 529, + 298, + 564 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 41, + 529, + 298, + 564 + ], + "type": "text", + "content": ", and use the Bayesian generation model to describe the data generation process:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 100, + 570, + 253, + 582 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 570, + 253, + 582 + ], + "spans": [ + { + "bbox": [ + 100, + 570, + 253, + 582 + ], + "type": "interline_equation", + "content": "p (x, y, z) = p (y \\mid z) p (z \\mid x) p (x)", + "image_path": "e52f8ad9e8d735a3ab74ab8773298cc5f69f2a2f31b5ee1d75b171e7cdd13362.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 41, + 590, + 298, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 590, + 298, + 652 + ], + "spans": [ + { + "bbox": [ + 41, + 590, + 298, + 652 + ], + "type": "text", + "content": "Among them, " + }, + { + "bbox": [ + 41, + 590, + 298, + 652 + ], + "type": "inline_equation", + "content": "p(y \\mid z)" + }, + { + "bbox": [ + 41, + 590, + 298, + 652 + ], + "type": "text", + "content": " represents the posterior distribution of the classifier for the latent variable, and " + }, + { + "bbox": [ + 41, + 590, + 298, + 652 + ], + "type": "inline_equation", + "content": "p(z \\mid x)" + }, + { + "bbox": [ + 41, + 590, + 298, + 652 + ], + "type": "text", + "content": " represents the prior distribution of the latent variable. Based on this, our goal is to optimize the model parameters by maximizing the marginal log-likelihood:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 93, + 657, + 259, + 677 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 657, + 259, + 677 + ], + "spans": [ + { + "bbox": [ + 93, + 657, + 259, + 677 + ], + "type": "interline_equation", + "content": "\\log (y \\mid x) = \\log \\int p (y \\mid z) p (z \\mid x) d z", + "image_path": "bb202b08b189fda03ed9f8a34755cc4f9f244fe15241e298af9a4ebaca3f9733.jpg" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 334, + 58, + 547, + 225 + ], + "blocks": [ + { + "bbox": [ + 334, + 58, + 547, + 225 + ], + "lines": [ + { + "bbox": [ + 334, + 58, + 547, + 225 + ], + "spans": [ + { + "bbox": [ + 334, + 58, + 547, + 225 + ], + "type": "image", + "image_path": "c8945917346c994832f059f7db5489e15026b3e9e6b1a93768160bb4b7790869.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 335, + 240, + 558, + 262 + ], + "lines": [ + { + "bbox": [ + 335, + 240, + 558, + 262 + ], + "spans": [ + { + "bbox": [ + 335, + 240, + 558, + 262 + ], + "type": "text", + "content": "Figure 1. The architecture of the probabilistic graphical model" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 311, + 268, + 568, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 268, + 568, + 330 + ], + "spans": [ + { + "bbox": [ + 311, + 268, + 568, + 330 + ], + "type": "text", + "content": "However, this integral is difficult to compute directly, so we use variational inference to approximate the solution. Define a variational distribution " + }, + { + "bbox": [ + 311, + 268, + 568, + 330 + ], + "type": "inline_equation", + "content": "q(z|x)" + }, + { + "bbox": [ + 311, + 268, + 568, + 330 + ], + "type": "text", + "content": " to approximate " + }, + { + "bbox": [ + 311, + 268, + 568, + 330 + ], + "type": "inline_equation", + "content": "p(z|x)" + }, + { + "bbox": [ + 311, + 268, + 568, + 330 + ], + "type": "text", + "content": ", and optimize the model through the evidence lower bound (ELBO):" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 312, + 338, + 570, + 354 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 338, + 570, + 354 + ], + "spans": [ + { + "bbox": [ + 312, + 338, + 570, + 354 + ], + "type": "interline_equation", + "content": "\\log p (y \\mid x) \\geq E _ {q (z \\mid x)} [ \\log p (y \\mid z) ] - D _ {K L} (q (z \\mid x) \\| p (z))", + "image_path": "4bf722e0285a33bb56a86df84ca351fde95bf2e681d8d0f451530ce70b8ce5ea.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 311, + 357, + 568, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 357, + 568, + 437 + ], + "spans": [ + { + "bbox": [ + 311, + 357, + 568, + 437 + ], + "type": "text", + "content": "Among them, " + }, + { + "bbox": [ + 311, + 357, + 568, + 437 + ], + "type": "inline_equation", + "content": "D_{KL}(\\cdot \\| \\cdot)" + }, + { + "bbox": [ + 311, + 357, + 568, + 437 + ], + "type": "text", + "content": " represents the Kullback-Leibler divergence, which is used to measure the gap between the approximate distribution and the true posterior distribution. In order to further optimize the classification performance of minority classes, we introduce category-aware variational inference and explicitly enhance the weight of minority class samples in the loss function:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 355, + 445, + 523, + 480 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 355, + 445, + 523, + 480 + ], + "spans": [ + { + "bbox": [ + 355, + 445, + 523, + 480 + ], + "type": "interline_equation", + "content": "L = \\sum_ {i = 1} ^ {N} w (y _ {i}) \\left[ E _ {q (z | x _ {i})} [ \\log p (y _ {i} \\mid z) ] - \\right.", + "image_path": "57a239a94222df9728fff416dc0f582b2ccc3b70ec254c8b69c33076069bada5.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 311, + 490, + 567, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 490, + 567, + 517 + ], + "spans": [ + { + "bbox": [ + 311, + 490, + 567, + 517 + ], + "type": "text", + "content": "Among them, " + }, + { + "bbox": [ + 311, + 490, + 567, + 517 + ], + "type": "inline_equation", + "content": "w(y_{i})" + }, + { + "bbox": [ + 311, + 490, + 567, + 517 + ], + "type": "text", + "content": " is the category weight coefficient, and a higher weight is set for minority class samples, for example:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 403, + 523, + 477, + 555 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 403, + 523, + 477, + 555 + ], + "spans": [ + { + "bbox": [ + 403, + 523, + 477, + 555 + ], + "type": "interline_equation", + "content": "w \\left(y _ {i}\\right) = \\frac {N _ {\\text {m a j o r}}}{N _ {\\text {m i n o r}}}", + "image_path": "acdfe8a5e054f9435eea5074a1a9c3feb17a8c5ca109e1f7b62d7971197424bb.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 311, + 562, + 567, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 562, + 567, + 586 + ], + "spans": [ + { + "bbox": [ + 311, + 562, + 567, + 586 + ], + "type": "text", + "content": "Where A and B represent the number of samples in the majority class and the minority class, respectively." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 311, + 590, + 567, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 590, + 567, + 626 + ], + "spans": [ + { + "bbox": [ + 311, + 590, + 567, + 626 + ], + "type": "text", + "content": "In the specific implementation, we use variational autoencoder (VAE) as the probability generation model, so that " + }, + { + "bbox": [ + 311, + 590, + 567, + 626 + ], + "type": "inline_equation", + "content": "p(z \\mid x)" + }, + { + "bbox": [ + 311, + 590, + 567, + 626 + ], + "type": "text", + "content": " obeys the normal distribution:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 378, + 634, + 502, + 649 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 378, + 634, + 502, + 649 + ], + "spans": [ + { + "bbox": [ + 378, + 634, + 502, + 649 + ], + "type": "interline_equation", + "content": "q (z \\mid x) = N (\\mu (x), \\sigma^ {2} (x))", + "image_path": "1e94108d7e948cbe2234be390b158b637b2839b2600cdc7b10c6f6f9d8c86757.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 312, + 656, + 510, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 656, + 510, + 668 + ], + "spans": [ + { + "bbox": [ + 312, + 656, + 510, + 668 + ], + "type": "text", + "content": "And optimize it by reparameterization technique:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 364, + 672, + 515, + 685 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 364, + 672, + 515, + 685 + ], + "spans": [ + { + "bbox": [ + 364, + 672, + 515, + 685 + ], + "type": "interline_equation", + "content": "z = \\mu (x) + \\sigma (x) \\cdot \\varepsilon , \\quad \\varepsilon \\sim \\mathrm {N} (0, \\mathrm {I})", + "image_path": "45c9c4e23af1f7fcd71e4e1f1c42005c9812a59abe82b5be235e2ec375883ea3.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 311, + 693, + 567, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 693, + 567, + 715 + ], + "spans": [ + { + "bbox": [ + 311, + 693, + 567, + 715 + ], + "type": "text", + "content": "In this way, the model can learn A and B through the neural network to obtain a more stable gradient. In addition, building" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 41, + 53, + 298, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 53, + 298, + 196 + ], + "spans": [ + { + "bbox": [ + 41, + 53, + 298, + 196 + ], + "type": "text", + "content": "on previous adversarial learning frameworks [16] and generative design concepts [17], we introduce an adversarial learning mechanism to optimize the category distribution. This mechanism strengthens the model's ability to differentiate minority class samples by ensuring that generated data more closely matches real distributions. Specifically, a discriminator is constructed to distinguish the distribution of generated minority class samples from authentic instances, ensuring closer alignment with observed data. Furthermore, incorporating few-shot learning strategies [18] and dynamic adaptation techniques [19] enhances the model's resilience in limited-data conditions. The optimization goal of the discriminator is as follows:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 96, + 202, + 241, + 222 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 202, + 241, + 222 + ], + "spans": [ + { + "bbox": [ + 96, + 202, + 241, + 222 + ], + "type": "interline_equation", + "content": "\\min _ {G} \\max _ {D} E _ {x \\sim p _ {\\text {d a t a}} (x)} [ \\log D (z) ] +", + "image_path": "3c81cf688d23b9a5c258f1a910d4b6375efaa9b4bf011b786cf38f50312871eb.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 98, + 225, + 203, + 243 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 225, + 203, + 243 + ], + "spans": [ + { + "bbox": [ + 98, + 225, + 203, + 243 + ], + "type": "interline_equation", + "content": "E _ {x \\sim q _ {(z | x)}} [ \\log (1 - D (z)) ]", + "image_path": "93059c0b0200dfbf1341842c23946121be5bcecbebeabde6e3839117f7341335.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 41, + 248, + 298, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 248, + 298, + 283 + ], + "spans": [ + { + "bbox": [ + 41, + 248, + 298, + 283 + ], + "type": "text", + "content": "Through this adversarial learning method, the model can capture the characteristics of minority classes more accurately and avoid the overfitting problem of minority class samples." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 41, + 288, + 298, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 288, + 298, + 321 + ], + "spans": [ + { + "bbox": [ + 41, + 288, + 298, + 321 + ], + "type": "text", + "content": "In summary, this study combines deep probabilistic graph models, variational reasoning, and adversarial learning methods to optimize imbalanced data classification tasks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 329, + 212, + 340 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 329, + 212, + 340 + ], + "spans": [ + { + "bbox": [ + 132, + 329, + 212, + 340 + ], + "type": "text", + "content": "III. EXPERIMENT" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 41, + 346, + 95, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 346, + 95, + 357 + ], + "spans": [ + { + "bbox": [ + 41, + 346, + 95, + 357 + ], + "type": "text", + "content": "A. Dataset" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 41, + 360, + 298, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 360, + 298, + 482 + ], + "spans": [ + { + "bbox": [ + 41, + 360, + 298, + 482 + ], + "type": "text", + "content": "This study employs the Kaggle \"Credit Card Fraud Detection\" dataset, consisting of 284,807 credit card transactions from a European institution. Of these transactions, 492 are labeled as fraudulent, indicating a highly imbalanced class distribution (approximately 1:577). Each record contains 30 features, including 28 anonymized components derived from Principal Component Analysis (PCA), along with transaction time and amount. Personally identifiable information has been removed, leaving only numerical features, which were preprocessed through normalization, outlier detection, and data augmentation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 41, + 487, + 298, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 487, + 298, + 685 + ], + "spans": [ + { + "bbox": [ + 41, + 487, + 298, + 685 + ], + "type": "text", + "content": "Given the severe class imbalance, direct application of conventional classification models often leads to bias toward the majority class, compromising fraud detection. To address this challenge, we employed various sampling strategies, including under-sampling, over-sampling, and the Synthetic Minority Over-sampling Technique (SMOTE), to generate synthetic samples for the minority class and improve representation. We also evaluated the impact of different sampling methods on model stability and performance. The dataset was split into " + }, + { + "bbox": [ + 41, + 487, + 298, + 685 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 41, + 487, + 298, + 685 + ], + "type": "text", + "content": " training, " + }, + { + "bbox": [ + 41, + 487, + 298, + 685 + ], + "type": "inline_equation", + "content": "15\\%" + }, + { + "bbox": [ + 41, + 487, + 298, + 685 + ], + "type": "text", + "content": " validation, and " + }, + { + "bbox": [ + 41, + 487, + 298, + 685 + ], + "type": "inline_equation", + "content": "15\\%" + }, + { + "bbox": [ + 41, + 487, + 298, + 685 + ], + "type": "text", + "content": " test sets. Evaluation metrics included Precision, Recall, F1-score, and the Area Under the Receiver Operating Characteristic Curve (AUC-ROC). Comparative experiments with different data augmentation techniques demonstrated that integrating probabilistic modeling with these strategies substantially enhances fraud detection and reduces false positives, thereby improving the model's reliability in practical applications." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 312, + 54, + 408, + 65 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 54, + 408, + 65 + ], + "spans": [ + { + "bbox": [ + 312, + 54, + 408, + 65 + ], + "type": "text", + "content": "B. Experiment Result" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 310, + 68, + 569, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 68, + 569, + 299 + ], + "spans": [ + { + "bbox": [ + 310, + 68, + 569, + 299 + ], + "type": "text", + "content": "This study primarily compares deep probabilistic graphical models (DPGMs) with several advanced imbalanced data classification methods to validate their effectiveness. First, we select the generative adversarial network (GAN)-based methods, such as WGANGP-SMOTE and ADASYN-GAN, which leverage GANs to synthesize minority class samples and mitigate data imbalance. Second, we evaluate class-adaptive ensemble learning methods, including Balanced Random Forest (BRF) and XGBoost-Cost Sensitive, which enhance minority class learning by adjusting sampling strategies or modifying loss functions. Additionally, we compare attention-based imbalanced classification methods, such as Self-Attention Anomaly Detection (SAAD) and Hierarchical Attention Networks (HAN), which have demonstrated strong anomaly detection capabilities in credit card fraud detection and similar tasks. Through these comparative experiments, we aim to comprehensively assess the advantages of deep probabilistic graphical models in minority class representation learning, generalization ability, and classification performance." + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 312, + 320, + 569, + 418 + ], + "blocks": [ + { + "bbox": [ + 378, + 310, + 501, + 320 + ], + "lines": [ + { + "bbox": [ + 378, + 310, + 501, + 320 + ], + "spans": [ + { + "bbox": [ + 378, + 310, + 501, + 320 + ], + "type": "text", + "content": "Table 1. Integration Testing 1" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 312, + 320, + 569, + 418 + ], + "lines": [ + { + "bbox": [ + 312, + 320, + 569, + 418 + ], + "spans": [ + { + "bbox": [ + 312, + 320, + 569, + 418 + ], + "type": "table", + "html": "
ModelAUCPrecisionRecallF1-Score
GAN [20]0.8420.7160.6540.684
ADASYN [21]0.8560.7290.6680.697
SMOTE [22]0.8710.7420.6830.711
BRF [23]0.8890.7640.7210.742
XGBOOST-Cost [24]0.9030.7790.7350.757
SAAD [25]0.9150.7930.7510.771
HAN [26]0.9270.8060.7680.786
Ours0.9410.8220.7850.803
", + "image_path": "ffd677e4e9724b33fd97b088d2b56b347e397c7cd6d17964076bb5ea1b746ab4.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 310, + 419, + 568, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 419, + 568, + 518 + ], + "spans": [ + { + "bbox": [ + 310, + 419, + 568, + 518 + ], + "type": "text", + "content": "Our proposed deep probabilistic graphical model (DPGM) outperforms all compared methods on every evaluation metric, demonstrating superior generalization in imbalanced classification. With an AUC of 0.941, it clearly surpasses traditional oversampling (e.g., SMOTE, ADASYN) and ensemble methods (e.g., BRF, XGBoost-Cost Sensitive). Importantly, it achieves a Recall of 0.785 while maintaining a Precision of 0.822, reflecting its effectiveness in detecting minority class samples without overly biasing the model." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 310, + 522, + 569, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 522, + 569, + 644 + ], + "spans": [ + { + "bbox": [ + 310, + 522, + 569, + 644 + ], + "type": "text", + "content": "DPGMs model the latent distribution of minority samples more effectively than conventional oversampling, thereby reducing overfitting. Compared to attention-based methods (e.g., SAAD, HAN), our model delivers a higher F1-score (0.803 versus 0.786), illustrating the benefits of uncertainty-aware probabilistic modeling. Overall, these results confirm that combining deep probabilistic modeling with variational inference optimizes class distribution and enhances minority class discrimination, offering a robust solution for imbalanced data classification. Figure 2 presents the corresponding loss function trajectory." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 64, + 288, + 209 + ], + "blocks": [ + { + "bbox": [ + 51, + 64, + 288, + 209 + ], + "lines": [ + { + "bbox": [ + 51, + 64, + 288, + 209 + ], + "spans": [ + { + "bbox": [ + 51, + 64, + 288, + 209 + ], + "type": "image", + "image_path": "e739f93a7fbde9cf2a99b06d3897132dc8dbed0714b55865b79655a88af9e655.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 108, + 226, + 247, + 238 + ], + "lines": [ + { + "bbox": [ + 108, + 226, + 247, + 238 + ], + "spans": [ + { + "bbox": [ + 108, + 226, + 247, + 238 + ], + "type": "text", + "content": "Figure 2. Loss function drop graph" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 43, + 243, + 297, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 243, + 297, + 386 + ], + "spans": [ + { + "bbox": [ + 43, + 243, + 297, + 386 + ], + "type": "text", + "content": "From the loss function decline curve, both the training loss (Train) and test loss (Test) exhibit a clear downward trend during training iterations. This indicates that the model continuously learns features and optimizes parameters to effectively reduce errors. In the initial phase of training (between 5,000 and 25,000 iterations), the loss decreases at the fastest rate, suggesting that the model rapidly learns data representations and significantly improves classification performance. However, as the number of iterations increases, the rate of loss reduction gradually slows down and stabilizes after approximately 125,000 iterations. This trend implies that the model is approaching convergence, where further optimization yields diminishing returns." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 43, + 392, + 297, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 392, + 297, + 675 + ], + "spans": [ + { + "bbox": [ + 43, + 392, + 297, + 675 + ], + "type": "text", + "content": "A comparison of the training and test loss curves reveals that the test loss consistently remains lower than the training loss, and both curves follow a similar trajectory. This observation suggests that the model demonstrates good generalization ability without exhibiting significant overfitting. If the training loss were substantially lower than the test loss, it would indicate that the model performs well on training data but struggles to generalize to unseen data. However, the current loss curves do not display such a pattern, implying that the applied regularization strategies and optimization methods effectively mitigate overfitting. Furthermore, the test loss decreases at a rate similar to that of the training loss in the initial stages, further validating the model's stable learning process. Overall, these experimental results confirm that the model successfully optimizes the loss function during training, leading to a substantial reduction in both training and test errors ultimately reaching a relatively low level. This outcome suggests that the chosen training strategy, hyperparameter configuration, and optimization techniques are effective, allowing the model to learn the data distribution efficiently while maintaining strong generalization performance. Additionally, the stabilization of the loss curves indicates that the training process has effectively converged, suggesting that training can be halted or fine-tuned further to ensure optimal performance on the test set. Finally, this paper also gives the T-SNE results after training, as shown in Figure 3." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 317, + 55, + 580, + 277 + ], + "blocks": [ + { + "bbox": [ + 317, + 55, + 580, + 277 + ], + "lines": [ + { + "bbox": [ + 317, + 55, + 580, + 277 + ], + "spans": [ + { + "bbox": [ + 317, + 55, + 580, + 277 + ], + "type": "image", + "image_path": "6ad69f33204c10e92da4741df3cc69002914239055daa6956253b03c994af45e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 358, + 286, + 523, + 297 + ], + "lines": [ + { + "bbox": [ + 358, + 286, + 523, + 297 + ], + "spans": [ + { + "bbox": [ + 358, + 286, + 523, + 297 + ], + "type": "text", + "content": "Figure 3. T-SNE result map after training" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 314, + 302, + 567, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 302, + 567, + 423 + ], + "spans": [ + { + "bbox": [ + 314, + 302, + 567, + 423 + ], + "type": "text", + "content": "From the T-SNE results, it is evident that after training, the data points form distinct cluster-like distributions in the two-dimensional space, indicating that the model has successfully learned the feature differences between different classes. As observed in the visualization, the two categories (represented in blue and red) are well separated, suggesting that the model has developed strong discriminative capabilities in the high-dimensional feature space. The presence of a clear boundary between the classes demonstrates that the model effectively extracts distinguishing features without causing sample overlap, thereby validating its effectiveness." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 314, + 429, + 567, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 429, + 567, + 528 + ], + "spans": [ + { + "bbox": [ + 314, + 429, + 567, + 528 + ], + "type": "text", + "content": "Furthermore, the overall data distribution demonstrates that the T-SNE dimensionality reduction retains intra-class compactness while ensuring inter-class separability. The blue and red data points are well-clustered in distinct regions without significant overlap, indicating that the model effectively distinguishes between different categories in the feature space. Even when dealing with an imbalanced dataset, the model successfully learns the distribution patterns of the minority class." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 533, + 567, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 533, + 567, + 675 + ], + "spans": [ + { + "bbox": [ + 314, + 533, + 567, + 675 + ], + "type": "text", + "content": "However, while the T-SNE results illustrate a clear class separation, further quantitative evaluation is necessary to assess the robustness of the classification boundaries. For instance, if significant distribution shifts occur in certain test data samples, it may indicate that the model is still susceptible to overfitting. Additionally, since T-SNE is a nonlinear dimensionality reduction method, it may exaggerate the separation between classes, meaning that the actual decision boundaries in the high-dimensional space may not be as well-defined as they appear in the visualization. Therefore, a comprehensive evaluation incorporating classification metrics such as Precision, Recall, and AUC is essential to fully validate the model's generalization performance." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 54, + 212, + 64 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 54, + 212, + 64 + ], + "spans": [ + { + "bbox": [ + 132, + 54, + 212, + 64 + ], + "type": "text", + "content": "IV. CONCLUSION" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 41, + 69, + 298, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 69, + 298, + 224 + ], + "spans": [ + { + "bbox": [ + 41, + 69, + 298, + 224 + ], + "type": "text", + "content": "This study proposes an imbalanced data classification method based on deep probabilistic graphical models (DPGMs) and validates its effectiveness through experiments on a credit card fraud detection dataset. The experimental results demonstrate that the proposed method outperforms traditional oversampling techniques, ensemble learning approaches, and attention-based models in key metrics such as AUC and F1-score, confirming the effectiveness of probabilistic modeling in handling imbalanced classification tasks. By integrating variational inference, class-weight adjustment, and adversarial learning mechanisms, our model more accurately captures the feature distribution of the minority class, enhancing the classifier's discriminative ability while mitigating the overfitting issues commonly observed in traditional methods." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 41, + 228, + 298, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 228, + 298, + 448 + ], + "spans": [ + { + "bbox": [ + 41, + 228, + 298, + 448 + ], + "type": "text", + "content": "Despite the promising performance of our approach in imbalanced data classification, several aspects warrant further improvement. For instance, in cases of extreme imbalance, the minority class samples may still provide insufficient information, potentially limiting the model's generalization capability. Additionally, deep probabilistic graphical models involve high computational complexity, requiring extensive sampling and variational inference steps during training, which may impact deployment efficiency. Therefore, future research could focus on optimizing the computational efficiency of probabilistic modeling to enhance the model's adaptability across different data distributions. Several directions can be explored in future research. More efficient Bayesian optimization methods can be investigated to reduce the computational cost of DPGMs, making them applicable to larger-scale imbalanced datasets. Furthermore, in practical applications, federated learning frameworks can be incorporated to enable cross-institutional model training while preserving data privacy, thereby enhancing the applicability of imbalanced classification methods in real-world scenarios." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 142, + 455, + 196, + 465 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 455, + 196, + 465 + ], + "spans": [ + { + "bbox": [ + 142, + 455, + 196, + 465 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 42, + 469, + 298, + 711 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 42, + 469, + 297, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 469, + 297, + 506 + ], + "spans": [ + { + "bbox": [ + 42, + 469, + 297, + 506 + ], + "type": "text", + "content": "[1] Y. Cheng, Z. Xu, Y. Chen, Y. Wang, Z. Lin and J. Liu, \"A Deep Learning Framework Integrating CNN and BiLSTM for Financial Systemic Risk Analysis and Prediction,\" arXiv preprint arXiv:2502.06847, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 43, + 508, + 296, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 508, + 296, + 536 + ], + "spans": [ + { + "bbox": [ + 43, + 508, + 296, + 536 + ], + "type": "text", + "content": "[2] J. Liu, \"Multimodal Data-Driven Factor Models for Stock Market Forecasting,\" Journal of Computer Technology and Software, vol. 4, no. 2, 2025, https://doi.org/10.5281/zenodo.14984969." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 43, + 537, + 296, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 537, + 296, + 574 + ], + "spans": [ + { + "bbox": [ + 43, + 537, + 296, + 574 + ], + "type": "text", + "content": "[3] Y. Deng, \"A Hybrid Network Congestion Prediction Method Integrating Association Rules and LSTM for Enhanced Spatiotemporal Forecasting,\" Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14912727." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 43, + 576, + 297, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 576, + 297, + 612 + ], + "spans": [ + { + "bbox": [ + 43, + 576, + 297, + 612 + ], + "type": "text", + "content": "[4] P. Feng, \"Hybrid BiLSTM-Transformer Model for Identifying Fraudulent Transactions in Financial Systems,\" Journal of Computer Science and Software Applications, vol. 5, no. 3, 2025, https://doi.org/10.5281/zenodo.14985026." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 43, + 614, + 296, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 614, + 296, + 643 + ], + "spans": [ + { + "bbox": [ + 43, + 614, + 296, + 643 + ], + "type": "text", + "content": "[5] X. Du, \"Optimized Convolutional Neural Network for Intelligent Financial Statement Anomaly Detection,\" Journal of Computer Technology and Software, vol. 3, no. 9, pp. 11-15, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 43, + 644, + 298, + 672 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 644, + 298, + 672 + ], + "spans": [ + { + "bbox": [ + 43, + 644, + 298, + 672 + ], + "type": "text", + "content": "[6] S. Moolchandani, \"Advancing Credit Risk Management: Embracing Probabilistic Graphical Models in Banking,\" International Journal of Science and Research (IJSR), vol. 13, no. 6, pp. 74-80, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 43, + 673, + 296, + 711 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 673, + 296, + 711 + ], + "spans": [ + { + "bbox": [ + 43, + 673, + 296, + 711 + ], + "type": "text", + "content": "[7] S. Arya, T. Rahman and V. Gogate, \"Learning to Solve the Constrained Most Probable Explanation Task in Probabilistic Graphical Models,\" Proceedings of the 2024 International Conference on Artificial Intelligence and Statistics (AISTATS), PMLR, pp. 2791-2799, 2024." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 313, + 53, + 568, + 711 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 313, + 53, + 567, + 80 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 53, + 567, + 80 + ], + "spans": [ + { + "bbox": [ + 313, + 53, + 567, + 80 + ], + "type": "text", + "content": "[8] J. Hu, T. An, Z. Yu, J. Du and Y. Luo, \"Contrastive Learning for Cold Start Recommendation with Adaptive Feature Fusion,\" arXiv preprint arXiv:2502.03664, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 82, + 568, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 82, + 568, + 110 + ], + "spans": [ + { + "bbox": [ + 314, + 82, + 568, + 110 + ], + "type": "text", + "content": "[9] J. Zhan, \"Elastic Scheduling of Micro-Modules in Edge Computing Based on LSTM Prediction,\" Journal of Computer Technology and Software, vol. 4, no. 2, 2025, https://doi.org/10.5281/zenodo.14984949." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 112, + 567, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 112, + 567, + 148 + ], + "spans": [ + { + "bbox": [ + 313, + 112, + 567, + 148 + ], + "type": "text", + "content": "[10] Q. Sun and S. Duan, \"User Intent Prediction and Response in Human-Computer Interaction via BiLSTM,\" Journal of Computer Science and Software Applications, vol. 5, no. 3, 2025, https://doi.org/10.5281/zenodo.14985042." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 150, + 567, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 150, + 567, + 178 + ], + "spans": [ + { + "bbox": [ + 314, + 150, + 567, + 178 + ], + "type": "text", + "content": "[11] Y. Wang, \"Time-Series Premium Risk Prediction via Bidirectional Transformer,\" Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14955913." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 314, + 180, + 567, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 180, + 567, + 217 + ], + "spans": [ + { + "bbox": [ + 314, + 180, + 567, + 217 + ], + "type": "text", + "content": "[12] T. Zhou, Z. Xu and J. Du, \"Efficient Market Signal Prediction for Blockchain HFT with Temporal Convolutional Networks,\" Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14912719." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 219, + 568, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 219, + 568, + 255 + ], + "spans": [ + { + "bbox": [ + 314, + 219, + 568, + 255 + ], + "type": "text", + "content": "[13] X. Wang, \"Data Mining Framework Leveraging Stable Diffusion: A Unified Approach for Classification and Anomaly Detection,\" Journal of Computer Technology and Software, vol. 4, no. 1, 2025, https://doi.org/10.5281/zenodo.14843181." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 314, + 258, + 567, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 258, + 567, + 294 + ], + "spans": [ + { + "bbox": [ + 314, + 258, + 567, + 294 + ], + "type": "text", + "content": "[14] X. Sun, \"Dynamic Distributed Scheduling for Data Stream Computing: Balancing Task Delay and Load Efficiency\", Journal of Computer Technology and Software, vol. 4, no. 1, 2025, https://doi.org/10.5281/zenodo.14785261." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 296, + 567, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 296, + 567, + 342 + ], + "spans": [ + { + "bbox": [ + 314, + 296, + 567, + 342 + ], + "type": "text", + "content": "[15] X. Yan, J. Du, L. Wang, Y. Liang, J. Hu and B. Wang, \"The Synergistic Role of Deep Learning and Neural Architecture Search in Advancing Artificial Intelligence,\" Proceedings of the 2024 International Conference on Electronics and Devices, Computational Science (ICEDCS), pp. 452-456, Sep. 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 314, + 343, + 567, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 343, + 567, + 371 + ], + "spans": [ + { + "bbox": [ + 314, + 343, + 567, + 371 + ], + "type": "text", + "content": "[16] P. Li, \"Improved Transformer for Cross-Domain Knowledge Extraction with Feature Alignment,\" Journal of Computer Science and Software Applications, vol. 5, no. 2, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 314, + 373, + 568, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 373, + 568, + 408 + ], + "spans": [ + { + "bbox": [ + 314, + 373, + 568, + 408 + ], + "type": "text", + "content": "[17] S. Wang, R. Zhang and X. Shi, \"Generative UI Design with Diffusion Models: Exploring Automated Interface Creation and Human-Computer Interaction,\" Transactions on Computational and Scientific Methods, vol. 5, no. 3, 2025." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 314, + 411, + 567, + 439 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 411, + 567, + 439 + ], + "spans": [ + { + "bbox": [ + 314, + 411, + 567, + 439 + ], + "type": "text", + "content": "[18] J. Gao, S. Lyu, G. Liu, B. Zhu, H. Zheng and X. Liao, \"A Hybrid Model for Few-Shot Text Classification Using Transfer and Meta-Learning,\" arXiv preprint arXiv:2502.09086, 2025." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 314, + 441, + 567, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 441, + 567, + 478 + ], + "spans": [ + { + "bbox": [ + 314, + 441, + 567, + 478 + ], + "type": "text", + "content": "[19] Y. Yao, \"Time-Series Nested Reinforcement Learning for Dynamic Risk Control in Nonlinear Financial Markets,\" Transactions on Computational and Scientific Methods, vol. 5, no. 1, 2025, https://doi.org/10.5281/zenodo.14677117." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 314, + 479, + 567, + 516 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 479, + 567, + 516 + ], + "spans": [ + { + "bbox": [ + 314, + 479, + 567, + 516 + ], + "type": "text", + "content": "[20] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. Courville and Y. Bengio, \"Generative Adversarial Nets,\" Proceedings of the 27th Advances in Neural Information Processing Systems (NeurIPS), pp. 1–9, 2014." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 314, + 517, + 567, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 517, + 567, + 555 + ], + "spans": [ + { + "bbox": [ + 314, + 517, + 567, + 555 + ], + "type": "text", + "content": "[21] H. He and Y. Bai, \"ADASYN: Adaptive Synthetic Sampling Approach for Imbalanced Learning,\" Proceedings of the IEEE International Joint Conference on Neural Networks (IEEE World Congress on Computational Intelligence), pp. 1322-1328, 2008." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 314, + 556, + 568, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 556, + 568, + 585 + ], + "spans": [ + { + "bbox": [ + 314, + 556, + 568, + 585 + ], + "type": "text", + "content": "[22] N. V. Chawla, K. W. Bowyer, L. O. Hall and W. P. Kegelmeyer, \"SMOTE: Synthetic Minority Over-Sampling Technique,\" Journal of Artificial Intelligence Research, vol. 16, pp. 321-357, 2002." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 314, + 586, + 567, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 586, + 567, + 605 + ], + "spans": [ + { + "bbox": [ + 314, + 586, + 567, + 605 + ], + "type": "text", + "content": "[23] A. Liaw and M. Wiener, \"Classification and Regression by randomForest,\" R News, vol. 2, no. 3, pp. 18-22, 2002." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 314, + 606, + 567, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 606, + 567, + 634 + ], + "spans": [ + { + "bbox": [ + 314, + 606, + 567, + 634 + ], + "type": "text", + "content": "[24] T. Chen and C. Guestrin, \"XGBoost: A Scalable Tree Boosting System,\" Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 785-794, 2016." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 314, + 636, + 567, + 664 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 636, + 567, + 664 + ], + "spans": [ + { + "bbox": [ + 314, + 636, + 567, + 664 + ], + "type": "text", + "content": "[25] Y. Zhou and R. C. Paffenroth, \"Self-Attention Anomaly Detection,\" Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 2774-2782, 2019." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 314, + 666, + 567, + 711 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 666, + 567, + 711 + ], + "spans": [ + { + "bbox": [ + 314, + 666, + 567, + 711 + ], + "type": "text", + "content": "[26] Z. Yang, D. Yang, C. Dyer, X. He, A. Smola and E. Hovy, \"Hierarchical Attention Networks for Document Classification,\" Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 1480-1489, 2016." + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05786/e603c6f4-386e-4380-abf7-2f18915b0ee6_content_list.json b/data/2025/2504_05xxx/2504.05786/e603c6f4-386e-4380-abf7-2f18915b0ee6_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..169da4f1ee1b5d0f3d2140b326e301675d06a90e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05786/e603c6f4-386e-4380-abf7-2f18915b0ee6_content_list.json @@ -0,0 +1,1200 @@ +[ + { + "type": "text", + "text": "How to Enable LLM with 3D Capacity? A Survey of Spatial Reasoning in LLM", + "text_level": 1, + "bbox": [ + 98, + 108, + 898, + 131 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jirong Zha $^{1*}$ , Yuxuan Fan $^{2*}$ , Xiao Yang $^{2}$ , Chen Gao $^{1\\dagger}$ , Xinlei Chen $^{1\\dagger}$", + "bbox": [ + 192, + 147, + 799, + 167 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Tsinghua University", + "bbox": [ + 413, + 170, + 583, + 188 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2The Hong Kong University of Science and Technology (Guang Zhou)", + "bbox": [ + 220, + 189, + 777, + 207 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "zhajirong23@mails.tsinghua.edu.cn, {yfan546, xyang856}@connect.hkust-gz.edu.cn, chgao96@gmail.com, chen.xinlei@sz.tsinghua.edu.cn", + "bbox": [ + 161, + 209, + 836, + 243 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 245, + 271, + 323, + 287 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D spatial understanding is essential in real-world applications such as robotics, autonomous vehicles, virtual reality, and medical imaging. Recently, Large Language Models (LLMs), having demonstrated remarkable success across various domains, have been leveraged to enhance 3D understanding tasks, showing potential to surpass traditional computer vision methods. In this survey, we present a comprehensive review of methods integrating LLMs with 3D spatial understanding. We propose a taxonomy that categorizes existing methods into three branches: image-based methods deriving 3D understanding from 2D visual data, point cloud-based methods working directly with 3D representations, and hybrid modality-based methods combining multiple data streams. We systematically review representative methods along these categories, covering data representations, architectural modifications, and training strategies that bridge textual and 3D modalities. Finally, we discuss current limitations, including dataset scarcity and computational challenges, while highlighting promising research directions in spatial perception, multi-modal fusion, and real-world applications.", + "bbox": [ + 114, + 297, + 455, + 632 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 83, + 657, + 223, + 672 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large Language Models (LLMs) have evolved from basic neural networks to advanced transformer models like BERT [Kenton and Toutanova, 2019] and GPT [Radford, 2018], originally excelling at language tasks by learning from vast text datasets. Recent advancements, however, have extended these models beyond pure linguistic processing to encompass multimodal ability (In this paper, when we refer to LLMs, we specifically mean those that integrate multimodal functions). Their ability to capture complex patterns and relationships [Chen et al., 2024a] now holds promise for spatial reasoning tasks [Ma et al., 2024b]. By applying these enhanced models to challenges such as understanding 3D object relationships and spatial navigation, we open up new opportunities for advancing fields like robotics, computer vision, and augmented reality [Gao et al., 2024].", + "bbox": [ + 81, + 680, + 488, + 888 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1c4b8c1a8e39901fceb895fcf642206dfa4b227423055dcf9d75196664ca28d0.jpg", + "image_caption": [ + "Figure 1: Large Language Models can acquire 3D spatial reasoning capabilities through various input sources including multi-view images, RGB-D images, point clouds, and hybrid modalities, enabling the processing and understanding of three-dimensional information." + ], + "image_footnote": [], + "bbox": [ + 514, + 271, + 908, + 411 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "At the same time, 3D data and 3D modeling techniques have seen significant developments [Ma et al., 2024c], finding extensive applications in virtual and augmented reality, robotics, autonomous vehicles, gaming, medical imaging, and more. Unlike traditional two-dimensional images, 3D data provides a richer view of objects and environments, capturing essential spatial relationships and geometry. Such information is critical for tasks like scene reconstruction, object manipulation, and autonomous navigation, where merely text-based descriptions or 2D representations may fall short of conveying the necessary depth or spatial context.", + "bbox": [ + 506, + 505, + 913, + 657 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "LLMs help Spatial Understanding. Bringing these two fields together—powerful language understanding from LLMs and the spatial realism of 3D data—offers the potential for highly capable, context-aware systems. From a linguistic perspective, real-world descriptions often reference physical arrangement, orientation, or manipulations of objects in space. Text alone can be imprecise or ambiguous about size, shape, or relative positioning unless one can integrate a robust spatial or visual understanding. Consequently, there is growing interest in enhancing LLMs with a \"3D capacity\" that enables them to interpret, reason, and even generate three-dimensional representations based on natural language prompts. Such an integrated approach opens up exciting prospects: robots that can follow language instructions more effectively by grounding their commands in 3D context, architects who quickly prototype 3D layouts from textual descriptions, game design", + "bbox": [ + 506, + 666, + 915, + 891 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.05786v1 [cs.CV] 8 Apr 2025", + "bbox": [ + 22, + 268, + 60, + 700 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ers who generate immersive environments for narrative-based experiences, and many other creative applications yet to be envisioned.", + "bbox": [ + 81, + 68, + 486, + 109 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Motivation. Although LLMs have been increasingly applied in 3D-related tasks, and Ma et al. [2024b] provided a systematic overview of this field, the rapid advancement of this domain has led to numerous new developments in recent months, necessitating an up-to-date survey that captures these recent breakthroughs. Integrating 3D capacity into LLMs faces several key challenges: (1) the scarcity of high-quality 3D datasets compared to abundant text corpora; (2) the fundamental mismatch between sequential text data and continuous 3D spatial structures, requiring specialized architectural adaptations; and (3) the intensive computational requirements for processing 3D data at scale. While early attempts at combining language and 3D have shown promise, current approaches often remain limited in scope, scalability, and generalization capability. Most existing solutions are domain-specific and lack the broad applicability characteristic of text-based LLMs.", + "bbox": [ + 81, + 113, + 486, + 335 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Contribution. The contributions of this work are summarized in the following three aspects: (1) A structured taxonomy. We provide a timely and comprehensive survey that distinguishes itself from the systematic overview offered by Ma et al. [2024b] by presenting a novel perspective on LLM applications in 3D-related tasks: our work constructs a structured taxonomy that categorizes existing research into three primary groups (Figure 2) and offers a forward-looking analysis of the latest breakthroughs, thereby underscoring our unique contributions and the significance of our approach in advancing the field. (2) A comprehensive review. Building on the proposed taxonomy, we systematically review the current research progress on LLMs for spatial reasoning tasks. (3) Future directions. We highlight the remaining limitations of existing works and suggest potential directions for future research.", + "bbox": [ + 81, + 339, + 486, + 547 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Preliminary", + "text_level": 1, + "bbox": [ + 83, + 561, + 218, + 578 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 Large Language Models", + "text_level": 1, + "bbox": [ + 83, + 580, + 313, + 595 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Large Language Models (LLMs) have evolved from early word embeddings to context-aware models like BERT [Kenton and Toutanova, 2019]. Generative transformers such as GPT series [Radford, 2018], have further advanced text generation and few-shot learning. However, these models often struggle with spatial reasoning due to their focus on textual patterns, prompting efforts to integrate external spatial knowledge [Fu et al., 2024].", + "bbox": [ + 81, + 598, + 486, + 709 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Vision-Language Models (VLMs) extend LLMs by aligning visual data with text. Early examples like CLIP [Radford et al., 2021] leverage co-attentional architectures and contrastive learning, while later models such as BLIP [Li et al., 2022] refine these techniques with larger datasets. Yet, most VLMs process only 2D data, limiting their ability to capture detailed 3D spatial configurations. Integrating 3D context via depth maps, point clouds, or voxels remains challenging, motivating ongoing research toward more robust spatial intelligence.", + "bbox": [ + 81, + 709, + 486, + 835 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2 3D Data Structures", + "text_level": 1, + "bbox": [ + 83, + 843, + 276, + 857 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D data has different structures, which are essential for understanding the three-dimensional world, and common methods", + "bbox": [ + 83, + 859, + 486, + 888 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "include point clouds, voxel grids, polygonal meshes, neural fields, hybrid representations, and 3D Gaussian splatting. Point clouds represent shapes using discrete points, typically denoted as", + "bbox": [ + 506, + 68, + 913, + 125 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nP = \\left\\{p _ {i} \\in \\mathbb {R} ^ {3} \\mid i = 1, \\dots , N \\right\\},\n$$\n", + "text_format": "latex", + "bbox": [ + 604, + 128, + 815, + 148 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "which are storage-efficient but lack surface topology. Voxel grids partition space into uniform cubes, with each voxel $V(i,j,k)$ storing occupancy or distance values, providing detailed structure at the expense of increased memory usage at higher resolutions. Polygonal meshes compactly encode complex geometries through a set of vertices $\\{v_{i}\\}$ and faces $\\{F_j\\}$ , though their unstructured and non-differentiable nature poses challenges for integration with neural networks. Neural fields offer an implicit approach by modeling 3D shapes as continuous and differentiable functions, such as", + "bbox": [ + 508, + 152, + 911, + 292 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nf _ {\\theta}: \\mathbb {R} ^ {3} \\rightarrow (c, \\sigma),\n$$\n", + "text_format": "latex", + "bbox": [ + 650, + 297, + 769, + 314 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "which maps spatial coordinates to color $c$ and density $\\sigma$ . Hybrid representations combine these neural fields with traditional volumetric methods (e.g., integrating $f_{\\theta}$ with voxel grids) to achieve high-quality, real-time rendering. Meanwhile, 3D Gaussian splatting enhances point clouds by associating each point $p_i$ with a covariance matrix $\\Sigma_i$ and color $c_i$ , efficiently encoding radiance information for rendering. Each method has its unique strengths and trade-offs, making them suitable for different applications in 3D understanding and generation.", + "bbox": [ + 506, + 319, + 913, + 459 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.3 Proposed taxonomy", + "text_level": 1, + "bbox": [ + 508, + 468, + 705, + 484 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We propose a taxonomy that classifies 3D-LLM research into three main categories based on input modalities and integration strategies, as shown in Figure 1: Image-based spatial reasoning encompasses approaches that derive 3D understanding from 2D images. This includes multi-view methods that reconstruct 3D scenes, RGB-D images providing explicit depth information, monocular 3D perception inferring depth from single views, and medical imaging applications. While these approaches benefit from readily available image data and existing vision models, they may struggle with occlusions and viewpoint limitations. Point cloud-based spatial reasoning works directly with 3D point cloud data through three alignment strategies: (1) Direct alignment that immediately connects point features with language embeddings, (2) Step-by-step alignment that follows sequential stages to bridge modalities, and (3) Task-specific alignment customized for particular spatial reasoning requirements. These methods maintain geometric fidelity but face challenges in handling unstructured 3D data. Hybrid modality-based spatial reasoning combines multiple data streams through either tightly or loosely coupled architectures. Tightly coupled approaches integrate modalities through shared embeddings or end-to-end training, while loosely coupled methods maintain modular components with defined interfaces between them. This enables leveraging complementary strengths across modalities but increases architectural complexity.", + "bbox": [ + 506, + 487, + 913, + 847 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This taxonomy provides a structured framework for understanding the diverse technical approaches in the field while highlighting the distinct challenges and trade-offs each branch", + "bbox": [ + 506, + 845, + 913, + 888 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/10f4e27138d77cef1e66632497ab60fcb460eb82533892d1e5d74ab2bb75012d.jpg", + "image_caption": [ + "Figure 2: A Taxonomy of Models for Spatial Reasoning with LLMs: Image-based, Point Cloud-based, and Hybrid Modality-based Approaches and Their Subdivisions." + ], + "image_footnote": [], + "bbox": [ + 93, + 65, + 903, + 321 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/f869e9459876e49023165cdcf85439fb6449fd17975fe58ef81ce28fb4e6e702.jpg", + "image_caption": [ + "Figure 3: An overview of image-based approaches." + ], + "image_footnote": [], + "bbox": [ + 86, + 378, + 486, + 579 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "must address. Figure 2 presents a detailed breakdown of representative works in each category.", + "bbox": [ + 81, + 628, + 488, + 657 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Recent Advances of Spatial Reasoning in LLM", + "text_level": 1, + "bbox": [ + 83, + 672, + 459, + 705 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 Image-based Spatial Reasoning", + "text_level": 1, + "bbox": [ + 83, + 714, + 369, + 732 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Image-based spatial reasoning methods can be categorized based on their input modalities: multi-view images, monocular images, RGB-D images, and 3D medical images shown in Figure 3. Each modality offers unique advantages for enhancing 3D understanding in Large Language Models (LLMs). Multi-view images provide spatial data from different perspectives, monocular images extract 3D insights from a single view, RGB-D images incorporate depth information, and 3D medical images address domain-specific challenges in healthcare. These categories highlight the strengths and challenges of each approach in improving spatial reasoning capabilities.", + "bbox": [ + 81, + 734, + 488, + 888 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1.1 Multi-view Images as input", + "text_level": 1, + "bbox": [ + 508, + 380, + 754, + 395 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Several studies explore multi-view images to enhance LLMs' spatial understanding. LLaVA-3D Zhu et al. [2024b] leverages multi-view images and 3D positional embeddings to create 3D Patches, achieving state-of-the-art 3D spatial understanding while maintaining 2D image understanding capabilities. Agent3D-Zero Zhang et al. [2024] utilizes multiple images from different viewpoints, enabling VLMs to perform robust reasoning and understand spatial relationships, achieving zero-shot scene understanding. ShapeLLM Qi et al. [2024a] also uses multi-view image input, with robustness to occlusions. Scene-LLM Fu et al. [2024] uses multi-view images to build 3D feature representations, incorporating scene-level and egocentric 3D information to support interactive planning. SpatialPIN Ma et al. [2024a] enhances VLM's spatial reasoning by decomposing, understanding and reconstructing explicit 3D representations from multi-view images and generalizes to various 3D tasks. LLMI3D Yang et al. [2024] extracts spatially enhanced local features from high-resolution images using CNNs and a depth predictor and uses ViT to obtain tokens from low-resolution images. It employs a spatially enhanced cross-branch attention mechanism to effectively mine spatial local features of objects and uses geometric projection to handle. Extracting multi-view features results in huge computational overhead and ignores the essential geometry and depth information. Additionally, plain texts often lead to ambiguities especially in cluttered and complex 3D environmentsChen et al. [2024c]. ConceptGraphs Gu et al. [2024] proposes a graph-structured representation for 3D scenes that operates with an open vocabulary, which is developed by utilizing 2D foundation models and integrating their outputs into a 3D format through multiview association.", + "bbox": [ + 506, + 396, + 915, + 825 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1.2 Monocular Image as input", + "text_level": 1, + "bbox": [ + 508, + 830, + 748, + 845 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "LLMI3D Yang et al. [2024] uses a single 2D image for 3D perception, enhancing performance through spatial local feature mining, 3D query token decoding, and geometry-based", + "bbox": [ + 506, + 845, + 913, + 888 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3D reasoning. It uses a depth predictor and CNN to extract spatial local features and uses learnable 3D query tokens for geometric coordinate regression. It combines black-box networks and white-box projection to address changes in camera focal lengths.", + "bbox": [ + 81, + 68, + 486, + 138 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1.3 RGB-D Image as Input", + "text_level": 1, + "bbox": [ + 83, + 145, + 299, + 160 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Depth is estimated in SpatialPIN Ma et al. [2024a] by ZoeDepth when finding field of view (FOV) through perspective fields and provided for 3D-scene understanding and reconstruction. M3D-LaMed Bai et al. [2024] pre-trains the 3D medical vision encoder with medical image slices along depth and introduces end-to-end tuning to integrate 3D information into LLM.", + "bbox": [ + 81, + 161, + 486, + 257 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1.4 3D Medical Image as input", + "text_level": 1, + "bbox": [ + 83, + 265, + 325, + 280 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Unlike previous research focused on 2D medical images, integrating multi-modal other information such as textual descriptions, M3D-LaMed Bai et al. [2024] is specifically designed for 3D CT images by analyzing spatial features. It demonstrates excellent performance across multiple tasks, including image-text retrieval, report generation, visual question answering, localization, and segmentation. In order to generate radiology reports automatically, a brand-new framework Liu et al. [2024a] is proposed to employs low-resolution (LR) visual tokens as queries to extract information from high-resolution (HR) tokens, ensuring that detailed information is retained across HR volumes while minimizing computational costs by processing only the HR-informed LR visual queries. 3D-CT-GPT Chen et al. [2024b], based medical visual language model, is tailored for the generation of radiology reports from 3D CT scans, with a focus on chest CTs. OpenMEDLab Wang et al. [2024] comprises and publishes a variety of medical foundation models to process multi-modal medical data including Color Fundus Photography (CFP), Optical Coherence Tomography (OCT), endoscopy videos, CT&MR volumes and other pathology images.", + "bbox": [ + 81, + 280, + 488, + 571 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1.5 Discussion", + "text_level": 1, + "bbox": [ + 83, + 579, + 210, + 592 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Image-based spatial reasoning methods offer significant advantages, such as easy data acquisition and integration with pre-trained 2D models. Multi-view images provide rich spatial information, while depth estimation enhances scene understanding. However, challenges remain, including limited depth from single views, scale uncertainty, occlusion, and viewpoint dependency. These methods also face issues with visual hallucinations, generalization to novel scenes, and high computational costs. Future research should focus on improving multi-view integration and depth estimation to address these limitations.", + "bbox": [ + 81, + 594, + 486, + 746 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 Recent Advances of Point Cloud-based Spatial Reasoning", + "text_level": 1, + "bbox": [ + 83, + 757, + 482, + 787 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As shown in Figure 4, point cloud-based spatial reasoning has advanced significantly in recent years, employing three main alignment methods: Direct, Step-by-step, and Task-specific Alignment. These methods are essential for integrating point cloud data with language models to enable effective spatial reasoning. Direct Alignment establishes immediate connections between point cloud features and language model em", + "bbox": [ + 81, + 790, + 486, + 888 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/01a7846f1deba904180c76b95b69058c7981b82fbb85fd782f363da73b4c3476.jpg", + "image_caption": [ + "Figure 4: An overview of point cloud-based approaches." + ], + "image_footnote": [], + "bbox": [ + 517, + 66, + 905, + 244 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "beddings, while Step-by-step Alignment follows a sequential process through multiple stages. Task-specific Alignment is customized for particular spatial reasoning requirements. The choice of method depends on specific application needs and constraints.", + "bbox": [ + 506, + 290, + 911, + 359 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.1 Direct Alignment", + "text_level": 1, + "bbox": [ + 508, + 367, + 684, + 381 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Direct alignment methods create direct connections between point cloud data and language models. PointCLIP [Zhang et al., 2022] was a pioneer, projecting point clouds into multiview depth maps and using CLIP's pre-trained visual encoder for feature extraction, which was then aligned with textual features through a hand-crafted template. This approach showed promising results in zero-shot and few-shot classification tasks by transferring 2D knowledge to the 3D domain. PointCLIP V2 [Zhu et al., 2023] improved the projection quality with a realistic projection module and used GPT-3 for generating 3D-specific text descriptions, achieving better performance in zero-shot classification, part segmentation, and object detection. Chat-Scene [Huang et al., 2024] introduced object identifiers to facilitate object referencing during user-assistant interactions, representing scenes through object-centric embeddings. PointLLM [Xu et al., 2025] advanced the field by integrating a point cloud encoder with a powerful LLM, effectively fusing geometric, appearance, and linguistic information, and overcoming data scarcity with automated generation. These methods demonstrate the potential for effective 3D point cloud understanding through language models, enabling improved spatial reasoning and human-AI interaction.", + "bbox": [ + 506, + 382, + 913, + 686 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.2 Step-by-step Alignment", + "text_level": 1, + "bbox": [ + 508, + 693, + 728, + 707 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Step-by-step alignment has gained popularity in integrating point cloud features with language models. Notable approaches include GPT4Point [Qi et al., 2024b], which uses a Bert-based Point-QFormer for point-text feature alignment, followed by object generation. Grounded 3D-LLMs [Chen et al., 2024d] first aligns 3D scene embeddings with textual descriptions via contrastive pre-training, then fine-tunes with referent tokens. LiDAR-LLMs [Yang et al., 2023] employ a three-stage process: cross-modal alignment, object-centric learning, and high-level instruction fine-tuning. MiniGPT-3D [Tang et al., 2024a] follows a four-stage strategy, from point cloud projection to advanced model enhancements using Mixture of Query Experts. GreenPLM [Tang et al., 2024b] uses", + "bbox": [ + 506, + 708, + 913, + 888 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/6f283c8ccff38019b629c7d3baf89d8f01eae9a6757bd3db846940b8dbae1d64.jpg", + "image_caption": [ + "Figure 5: An overview of hybrid modality-based approaches." + ], + "image_footnote": [], + "bbox": [ + 86, + 66, + 480, + 200 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "a three-stage method that aligns a text encoder with an LLM using large text data, followed by point-LLM alignment with 3D data. These step-by-step approaches highlight the gradual improvement of spatial reasoning in 3D contexts, offering valuable insights for future research.", + "bbox": [ + 81, + 244, + 488, + 315 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2.3 Task-specific Alignment", + "text_level": 1, + "bbox": [ + 83, + 320, + 303, + 334 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Task-specific alignment customizes models for specific spatial reasoning tasks to improve performance and generalization. SceneVerse [Jia et al., 2024] introduces a large 3D vision-language dataset and Grounded Pre-training for Scenes (GPS), using multi-level contrastive alignment for unified scene-text alignment, achieving state-of-the-art results in tasks like 3D visual grounding and question answering. LL3DA [Chen et al., 2024c] presents a dialogue system that integrates textual instructions and visual interactions, excelling in complex 3D environments. Chat-3D [Wang et al., 2023] proposes a three-stage training scheme to align 3D scene representations with language models, capturing spatial relations with limited data. VisProg [Yuan et al., 2024] introduces visual programming for zero-shot open-vocabulary 3D grounding, leveraging LLMs to generate and execute programmatic representations. These task-specific approaches highlight the importance of adapting models to complex spatial relationships, enabling robust performance even with limited data or zero-shot tasks.", + "bbox": [ + 81, + 334, + 488, + 583 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2.4 Discussion", + "text_level": 1, + "bbox": [ + 83, + 590, + 210, + 603 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The three alignment approaches—Direct, Step-by-step, and Task-specific—each have distinct strengths and challenges. Direct alignment offers efficiency and quick results but struggles with complex spatial relationships. Step-by-step alignment improves feature integration at the cost of higher computational resources and training time. Task-specific alignment excels in specialized tasks but may lack broader applicability.", + "bbox": [ + 81, + 604, + 488, + 704 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 Hybrid Modality-based Spatial Reasoning", + "text_level": 1, + "bbox": [ + 83, + 710, + 449, + 726 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Hybrid modality-based spatial reasoning integrates point clouds, images, and LLMs through Tightly Coupled and Loosely Coupled approaches, as shown in Figure 5. The Tightly Coupled approach fosters close integration, enabling seamless interaction and high performance, while the Loosely Coupled approach promotes modularity, allowing independent operation of components for greater scalability and flexibility at the cost of reduced real-time interaction.", + "bbox": [ + 81, + 728, + 486, + 839 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.1 Tightly Coupled", + "text_level": 1, + "bbox": [ + 83, + 845, + 251, + 859 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Several recent works have explored tightly integrated approaches for spatial reasoning across point clouds, images and", + "bbox": [ + 81, + 859, + 488, + 888 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "language modalities: Point-Bind [Guo et al., 2023] proposes a joint embedding space to align point clouds with images and text through contrastive learning. It leverages ImageBind to construct unified representations that enable tasks like zero-shot classification, open-world understanding and multi-modal generation. The tight coupling allows Point-Bind to reason about point clouds using both visual and linguistic cues. JM3D [Ji et al., 2024] introduces a Structured Multimodal Organizer that tightly fuses multi-view images and hierarchical text trees with point clouds. This coupled architecture enables detailed spatial understanding by leveraging complementary information across modalities. The Joint Multi-modal Alignment further enhances the synergistic relationships between visual and linguistic features. Uni3D [Zhou et al., 2023] employs a unified transformer architecture that directly aligns point cloud features with image-text representations. By tightly coupling the modalities through end-to-end training, it achieves strong performance on tasks like zero-shot classification and open-world understanding. The shared backbone enables efficient scaling to billion-parameter models. Uni3D-LLM [Liu et al., 2024b] extends this tight coupling to LLMs through an LLM-to-Generator mapping block. This enables unified perception, generation and editing of point clouds guided by natural language. The tight integration allows leveraging rich semantic knowledge from LLMs while maintaining high-quality 3D understanding.", + "bbox": [ + 506, + 68, + 915, + 430 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.2 Loosely Coupled", + "text_level": 1, + "bbox": [ + 509, + 435, + 681, + 450 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Loosely coupled approaches maintain greater independence between different modalities while still enabling interaction through well-defined interfaces. MultiPLY [Hong et al., 2024] proposes a multisensory embodied LLM that handles multiple input modalities (visual, audio, tactile, thermal) through separate encoders. The modalities are processed independently and communicate through action tokens and state tokens. This decoupled design allows the system to process each modality with specialized encoders optimized for that data type, while enabling scalability and modularity in the system architecture. Similarly, UniPoint-LLM [Liu et al.] introduces a Multimodal Universal Token Space (MUTS) that loosely connects point clouds and images through independent encoders and a shared mapping layer. This modular design allows easy integration of new modalities and simplified training by only requiring alignment between new modalities and text, rather than pairwise alignment between all modalities. The main benefits of loosely coupled architectures include greater modularity and flexibility in system design, easier integration of new modalities, and independent scaling of different components. However, this approach may result in less optimal joint representation learning, reduced real-time interaction capabilities, and potential information loss between modalities compared to tightly coupled approaches.", + "bbox": [ + 506, + 450, + 913, + 784 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.3 Discussion", + "text_level": 1, + "bbox": [ + 509, + 790, + 637, + 803 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The choice between tightly and loosely coupled approaches presents important tradeoffs in multimodal spatial reasoning systems. Tightly coupled approaches like Point-Bind and JM3D offer stronger joint representation learning and real-time interaction capabilities through end-to-end training and shared feature spaces. This makes them particularly suitable", + "bbox": [ + 506, + 805, + 913, + 888 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/d940e65bdb4924227a21274aee6d75d1ec4fbdde1933224aff9d92e6f49a75fb.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelData SourceAlignment TypePre-trainingFine-tuningTaskCode
Image - basedLLaVA-3D [Zhu et al., 2024b]Multi-view Images-3D VQA, 3D Scene Understandingcode
Agent3D-Zero [Zhang et al., 2024]Multi-view Images-3D VQA, 3D Semantic Segmentation
ShapeLLM [Qi et al., 2024a]Multi-view Images-3D Object Classification, 3D Scene Captioningcode
Scene-LLM [Fu et al., 2024]Multi-view Images-3D VQA, Dense Captioning
SpatialPIN [Ma et al., 2024a]RGB-D Images-3D Motion Planning, Task Video Generation
LLMI3D [Yang et al., 2024]Monocular Images-3D Grounding, 3D VQA
Spatialvm [Chen et al., 2024a]Monocular Images-Dense Reward Annotator, Spatial Data Generationcode
M3D-LaMed [Bai et al., 2024]Medical Images-3D VQA, 3D VLPcode
HILT [Liu et al., 2024a]Medical Images-3DHRG
3D-CT-GPT [Chen et al., 2024b]Medical Images-Radiology Report Generation, 3D VQA
OpenMEDLab [Wang et al., 2024]Medical Images-Medical Imagingcode
Point Cloud - basedPointLLM [Xu et al., 2025]Point CloudDirect Alignment3D Object Classification, 3D Object Captioningcode
Chat-Scene [Huang et al., 2024]Point CloudDirect Alignment3D Visual Grounding, 3D Scene Captioningcode
PointCLIP [Zhang et al., 2022]Point CloudDirect Alignment3D Point Cloud Classificationcode
PointCLIPv2 [Zhu et al., 2023]Point CloudDirect Alignment3D Point Cloud Classificationcode
GPT4Point [Qi et al., 2024b]Point CloudStep-by-step Alignment3D Object Understandingcode
MiniGPT-3D [Tang et al., 2024a]Point CloudStep-by-step Alignment3D Object Classification, 3D Object Captioningcode
GreenPLM [Tang et al., 2024b]Point CloudStep-by-step Alignment3D Object Classificationcode
Grounded 3D-LLM [Chen et al., 2024d]Point CloudStep-by-step Alignment3D Object Detection, 3D VQAcode
Lidar-LLM [Yang et al., 2023]Point CloudStep-by-step Alignment3D Captioning, 3D Groundingcode
3D-LLaVA [Deng et al., 2025]Point CloudTask-specific Alignment3D VQA, 3D Captioningcode
ScanReason [Zhu et al., 2024a]Point CloudTask-specific Alignment3D Reasoning Groundingcode
SegPoint [He et al., 2024]Point CloudTask-specific Alignment3D Instruction Segmentation
Kestrel [Fei et al., 2024]Point CloudTask-specific AlignmentPart-Aware Point Grounding
SIG3D [Man et al., 2024]Point CloudTask-specific AlignmentSituation Estimationcode
Chat-3D [Wang et al., 2023]Point CloudTask-specific Alignment3D VQAcode
LL3DA [Chen et al., 2024c]Point CloudTask-specific Alignment3D Dense Captioningcode
Hybrid - basedPoint-bind [Guo et al., 2023]Point cloud, ImageTightly Coupled3D Cross-modal Retrieval, Any-to-3D Generationcode
JM3D [Ji et al., 2024]Point cloud, ImageTightly CoupledImage-3D Retrieval, 3D Part Segmentationcode
Uni3D [Zhou et al., 2023]Point cloud, ImageTightly CoupledZero-shot Shape Classificationcode
Uni3D-LLM [Liu et al., 2024b]Point cloud, ImageTightly Coupled3D VQA
MultiPLY [Hong et al., 2024]Point cloud, ImageLoosely CoupledObject retrievalcode
UniPoint-LLM [Liu et al.]Point cloud, ImageLoosely Coupled3D generation, 3D VQA
", + "bbox": [ + 86, + 65, + 911, + 395 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1: Taxonomy of Large Language Models with spatial reasoning capability. This table presents a comprehensive comparison of various 3D vision-language models categorized by their input modalities (image-based, point cloud-based, and hybrid-based), showing their data sources, alignment types, training strategies (pre-training and fine-tuning), primary tasks, and code availability. The models are organized into three main categories based on their input type: image-based models, point cloud-based models, and hybrid models that utilize both modalities.", + "bbox": [ + 81, + 404, + 913, + 458 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "for applications requiring detailed spatial understanding and precise control. However, they can be more complex to train and scale, and adding new modalities may require significant architectural changes. In contrast, loosely coupled approaches like MultiPLY and UniPoint-LLM provide greater modularity and flexibility, making them easier to extend and maintain. They allow independent optimization of different components and simplified training procedures, but may sacrifice some performance in tasks requiring fine-grained cross-modal understanding. The optimal choice ultimately depends on specific application requirements - tightly coupled architectures may be preferred for specialized high-performance systems, while loosely coupled designs better suit general-purpose platforms prioritizing extensibility and maintainability. Future work may explore hybrid approaches that combine the benefits of both paradigms, potentially using adaptive coupling mechanisms that adjust based on task demands.", + "bbox": [ + 81, + 479, + 486, + 717 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 Applications", + "text_level": 1, + "bbox": [ + 83, + 734, + 223, + 752 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "A key research focus leverages LLMs to enhance robotic embodied intelligence, enabling machines to interpret natural language commands for real-world tasks. This includes robotic control, navigation, and manipulation, where LLMs parse instructions, generate action plans, and adapt to dynamic environments—for instance, guiding robots to locate objects in cluttered spaces using text-based prompts.", + "bbox": [ + 81, + 757, + 486, + 856 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3D Scene Understanding. Advanced 3D scene analysis integrates multimodal data (e.g., images, point clouds, text) for", + "bbox": [ + 83, + 859, + 488, + 891 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "tasks like open-vocabulary segmentation, semantic mapping, and spatial reasoning. Central to this is 3D visual question answering (3D-VQA), requiring models to interpret queries about object attributes, spatial relationships, or contextual roles within scenes. Context-aware systems further account for user perspectives to deliver precise responses.", + "bbox": [ + 506, + 479, + 913, + 566 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Cross-Domain Applications. In healthcare, LLMs analyze volumetric medical scans (e.g., CT) for lesion detection and automated diagnostics. Autonomous driving systems utilize 3D-capable LLMs to interpret traffic scenes, aiding object detection [Zha et al., 2023, 2024] and path planning. Design-oriented applications include generating indoor layouts from textual requirements, while educational tools employ interactive 3D environments to teach spatial concepts.", + "bbox": [ + 506, + 571, + 913, + 686 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5 Challenges and Future Directions", + "text_level": 1, + "bbox": [ + 509, + 715, + 823, + 732 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1 summarizes the models that leverage LLMs to assist graph-related tasks according to the proposed taxonomy. Based on the above review and analysis, we believe that there is still much space for further enhancement in this field. Recent advances in integrating LLMs with three-dimensional (3D) data have demonstrated considerable promise. However, numerous challenges must still be overcome to realize robust and practical 3D-aware LLMs. Below, we summarize these obstacles and then outline potential pathways to address them, highlighting key research directions for the future.", + "bbox": [ + 506, + 750, + 913, + 890 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1 Challenges", + "text_level": 1, + "bbox": [ + 84, + 68, + 210, + 83 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Weak Spatial Reasoning and Representation. Multimodal LLMs (MLLMs) exhibit limited acuity in 3D spatial understanding, struggling with fine-grained relationships (e.g., front/back distinctions, occluded object localization) and precise geometric outputs (distances, angles). These issues stem partly from mismatches between unstructured point clouds and sequence-based LLM architectures, where high-dimensional 3D data incur prohibitive token counts or oversimplified encodings.", + "bbox": [ + 86, + 95, + 485, + 219 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Data and Evaluation Gaps. Progress in 3D-aware LLMs is hindered by the scarcity of high-quality 3D-text paired datasets. Unlike the abundant resources for 2D images and video, the 3D domain lacks standardized, richly annotated datasets crucial for training robust models. Existing benchmarks focus mainly on discriminative tasks like classification and retrieval—emphasizing category differentiation rather than generating rich, descriptive 3D scene outputs. Consequently, evaluations often rely on subjective metrics (e.g., human or GPT-based judgments) that can lack consistency. Advancing the field requires developing objective, comprehensive benchmarks that assess both open-vocabulary generation and the spatial plausibility of descriptions relative to the underlying 3D structure.", + "bbox": [ + 86, + 224, + 485, + 419 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Multimodal Integration and Generalization. Fusing 3D data (e.g., point clouds) with other modalities like 2D imagery, audio, or text poses significant challenges due to their distinct structural characteristics. The conversion and alignment of high-dimensional 3D data with lower-dimensional representations can lead to a loss of intricate details, diluting the original 3D richness. Moreover, current models often struggle with open-vocabulary recognition, limiting their ability to identify or describe objects outside of their training data—especially when encountering unseen scenes or novel objects. This difficulty is further compounded by the variability of natural language, from colloquial expressions to domain-specific terminology, and by noisy inputs. Thus, more sophisticated multimodal integration techniques and generalization strategies are needed to preserve geometric fidelity while accommodating diverse, unpredictable inputs.", + "bbox": [ + 86, + 425, + 485, + 647 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Complex Task Definition. While 3D-aware LLMs excel in controlled settings, they lack frameworks for nuanced language-context inference in dynamic environments. Task decomposition and scalable encoding methods are needed to balance geometric fidelity with computational tractability, particularly for interactive applications requiring real-time spatial reasoning.", + "bbox": [ + 86, + 652, + 485, + 750 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2 Future Directions", + "text_level": 1, + "bbox": [ + 86, + 763, + 261, + 777 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Enhancing 3D Perception and Representations. Addressing spatial reasoning gaps requires richer 3D-text datasets (e.g., from robotics, gaming, autonomous driving) and model architectures that encode geometric relationships. Multi-view data and robust depth cues can improve orientation, distance, and occlusion estimation. Compact 3D tokens and refined encoding/decoding methods may bridge unstructured point", + "bbox": [ + 86, + 791, + 485, + 888 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "clouds with sequence-based models, enabling fine-grained spatial understanding and generation.", + "bbox": [ + 513, + 69, + 911, + 95 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Multi-Modal Fusion and Instruction Understanding. Tighter integration of modalities (point clouds, images, text, audio) via unified latent spaces or attention mechanisms could preserve subtle geometric and semantic details. Enhanced instruction processing—including hierarchical task decomposition, contextual interpretation, and robustness to dialects/terminology—would improve compositional reasoning in 3D environments and broaden real-world applicability. Furthermore, by leveraging these integrated representations, models can more adeptly adapt to complex instructions and novel scenarios, ultimately paving the way for more robust and versatile 3D reasoning systems.", + "bbox": [ + 513, + 101, + 911, + 268 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Cross-Scene Generalization and Robust Evaluation. Open-vocabulary 3D understanding demands large-scale pretraining on diverse scenes and transfer/lifelong learning paradigms for adapting to novel objects or environments. This understanding extends beyond predefined categories to generalize to unseen objects and scenes. For instance, models need to comprehend \"an old rocking chair\" even if this specific type of chair never appeared in the training data.", + "bbox": [ + 513, + 273, + 911, + 383 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Expanding Applications for Autonomous Systems. 3D-aware LLMs hold potential in robotics (navigation, manipulation), medical imaging (lesion detection), architectural design, and interactive education. Future systems may integrate environmental constraints, user perspectives, and object affordances for autonomous planning and decision-making in dynamic 3D contexts.", + "bbox": [ + 513, + 388, + 911, + 486 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Collectively, these challenges and potential directions underscore the field's rapid evolution and its equally significant open questions. Moving forward, more robust 3D-specific data resources, better model architectures, and more refined evaluation protocols will be essential to unlock the full potential of LLMs in three-dimensional settings—and ultimately bring intelligent, multimodal understanding closer to real-world deployment.", + "bbox": [ + 513, + 487, + 911, + 598 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 513, + 616, + 635, + 631 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The integration of LLMs with 3D data is a dynamic research area. This survey categorized 3D-LLM research into image-based, point cloud-based, and hybrid modality-based spatial reasoning. It reviewed state-of-the-art methods, their applications in multiple fields, and associated challenges. Notably, image-based methods have data-related advantages but face issues like depth information shortage. Point cloud-based methods offer precise 3D details but encounter data-handling difficulties. Hybrid methods combine strengths yet struggle with data alignment. Applications are diverse, but challenges such as weak spatial perception, data scarcity, and evaluation problems exist. Future research should focus on enhancing 3D perception, improving multi-modal fusion, expanding generalization, developing evaluation metrics, enhancing instruction understanding, optimizing 3D representations, and exploring continuous learning. By addressing these, we can unlock the full potential of 3D-aware LLMs for real-world deployment and industry advancement.", + "bbox": [ + 513, + 638, + 911, + 888 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 84, + 66, + 179, + 82 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Fan Bai, Yuxin Du, Tiejun Huang, Max Q-H Meng, and Bo Zhao. M3d: Advancing 3d medical image analysis with multi-modal large language models. arXiv preprint arXiv:2404.00578, 2024.", + "Boyuan Chen, Zhuo Xu, Sean Kirmani, Brain Ichter, Dorsa Sadigh, Leonidas Guibas, and Fei Xia. Spatialvlm: Endowing vision-language models with spatial reasoning capabilities. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14455-14465, 2024.", + "Hao Chen, Wei Zhao, Yingli Li, Tianyang Zhong, Yisong Wang, Youlan Shang, Lei Guo, Junwei Han, Tianming Liu, Jun Liu, et al. 3d-ct-gpt: Generating 3d radiology reports through integration of large vision-language models. arXiv preprint arXiv:2409.19330, 2024.", + "Sijin Chen, Xin Chen, Chi Zhang, Mingsheng Li, Gang Yu, Hao Fei, Hongyuan Zhu, Jiayuan Fan, and Tao Chen. Ll3da: Visual interactive instruction tuning for omni-3d understanding reasoning and planning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26428-26438, 2024.", + "Yilun Chen, Shuai Yang, Haifeng Huang, Tai Wang, Ruiyuan Lyu, Runsen Xu, Dahua Lin, and Jiangmiao Pang. Grounded 3d-llm with referent tokens. arXiv preprint arXiv:2405.10370, 2024.", + "Jiajun Deng, Tianyu He, Li Jiang, Tianyu Wang, Feras Dayoub, and Ian Reid. 3d-llava: Towards generalist 3d lmm's with omni superpoint transformer. arXiv preprint arXiv:2501.01163, 2025.", + "Junjie Fei, Mahmoud Ahmed, Jian Ding, Eslam Mohamed Bakr, and Mohamed Elhoseiny. Kestrel: Point grounding multimodal llm for part-aware 3d vision-language understanding. arXiv preprint arXiv:2405.18937, 2024.", + "Rao Fu, Jingyu Liu, Xilun Chen, Yixin Nie, and Wenhan Xiong. Scene-llm: Extending language model for 3d visual understanding and reasoning. arXiv preprint arXiv:2403.11401, 2024.", + "Chen Gao, Baining Zhao, Weichen Zhang, Jinzhu Mao, Jun Zhang, Zhiheng Zheng, Fanhang Man, Jianjie Fang, Zile Zhou, Jinqiang Cui, et al. Embodiedcity: A benchmark platform for embodied agent in real-world city environment. arXiv preprint arXiv:2410.09604, 2024.", + "Qiao Gu, Ali Kuwajerwala, Sacha Morin, Krishna Murthy Jatavallabhula, Bipasha Sen, Aditya Agarwal, Corban Rivera, William Paul, Kirsty Ellis, Rama Chellappa, et al. Conceptgraphs: Open-vocabulary 3d scene graphs for perception and planning. In 2024 IEEE International Conference on Robotics and Automation (ICRA), pages 5021-5028. IEEE, 2024.", + "Ziyu Guo, Renrui Zhang, Xiangyang Zhu, Yiwen Tang, Xi-anzheng Ma, Jiaming Han, Kexin Chen, Peng Gao, Xi-anzhi Li, Hongsheng Li, et al. Point-bind & point-llm: Aligning point cloud with multi-modality for 3d understanding, generation, and instruction following. arXiv preprint arXiv:2309.00615, 2023." + ], + "bbox": [ + 84, + 85, + 486, + 888 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Shuting He, Henghui Ding, Xudong Jiang, and Bihan Wen. Segpoint: Segment any point cloud via large language model. In European Conference on Computer Vision, pages 349-367. Springer, 2024.", + "Yining Hong, Haoyu Zhen, Peihao Chen, Shuhong Zheng, Yilun Du, Zhenfang Chen, and Chuang Gan. 3d-llm: Injecting the 3d world into large language models. Advances in Neural Information Processing Systems, 36:20482-20494, 2023.", + "Yining Hong, Zishuo Zheng, Peihao Chen, Yian Wang, Junyan Li, and Chuang Gan. Multiply: A multisensory object-centric embodied large language model in 3d world. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26406-26416, 2024.", + "Haifeng Huang, Yilun Chen, Zehan Wang, Rongjie Huang, Runsen Xu, Tai Wang, Luping Liu, Xize Cheng, Yang Zhao, Jiangmiao Pang, et al. Chat-scene: Bridging 3d scene and large language models with object identifiers. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024.", + "Jiayi Ji, Haowei Wang, Changli Wu, Yiwei Ma, Xiaoshuai Sun, and Rongrong Ji. Jm3d & jm3d-llm: Elevating 3d representation with joint multi-modal cues. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024.", + "Baoxiong Jia, Yixin Chen, Huangyue Yu, Yan Wang, Xuesong Niu, Tengyu Liu, Qing Li, and Siyuan Huang. Sceneverse: Scaling 3d vision-language learning for grounded scene understanding. In European Conference on Computer Vision, pages 289-310. Springer, 2024.", + "Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 2019.", + "Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In International conference on machine learning, pages 12888-12900. PMLR, 2022.", + "Dingning Liu, Xiaoshui Huang, Zhihui Wang, Zhenfei Yin, Peng Gao, Yujiao Wu, Yuenan Hou, Xinzhu Ma, and Wanli Ouyang. Pointmllm: Aligning multi-modality with llm for point cloud understanding, generation and editing.", + "Che Liu, Zhongwei Wan, Yuqi Wang, Hui Shen, Haozhe Wang, Kangyu Zheng, Mi Zhang, and Rossella Arcucci. Benchmarking and boosting radiology report generation for 3d high-resolution medical images. arXiv preprint arXiv:2406.07146, 2024.", + "Dingning Liu, Xiaoshui Huang, Yuenan Hou, Zhihui Wang, Zhenfei Yin, Yongshun Gong, Peng Gao, and Wanli Ouyang. Uni3d-llm: Unifying point cloud perception, generation and editing with large language models. arXiv preprint arXiv:2402.03327, 2024.", + "Chenyang Ma, Kai Lu, Ta-Ying Cheng, Niki Trigoni, and Andrew Markham. Spatialpin: Enhancing spatial reasoning" + ], + "bbox": [ + 511, + 69, + 911, + 888 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "capabilities of vision-language models through prompting and interacting 3d priors. arXiv preprint arXiv:2403.13438, 2024.", + "Xianzheng Ma, Yash Bhalgat, Brandon Smart, Shuai Chen, Xinghui Li, Jian Ding, Jindong Gu, Dave Zhenyu Chen, Songyou Peng, Jia-Wang Bian, et al. When llms step into the 3d world: A survey and meta-analysis of 3d tasks via multi-modal large language models. arXiv preprint arXiv:2405.10255, 2024.", + "Yuexin Ma, Tai Wang, Xuyang Bai, Huitong Yang, Yuenan Hou, Yaming Wang, Yu Qiao, Ruigang Yang, and Xinge Zhu. Vision-centric bev perception: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024.", + "Yunze Man, Liang-Yan Gui, and Yu-Xiong Wang. Situational awareness matters in 3d vision language reasoning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13678-13688, 2024.", + "Zekun Qi, Runpei Dong, Shaochen Zhang, Haoran Geng, Chunrui Han, Zheng Ge, Li Yi, and Kaiheng Ma. Shapellm: Universal 3d object understanding for embodied interaction. In European Conference on Computer Vision, pages 214-238. Springer, 2024.", + "Zhangyang Qi, Ye Fang, Zeyi Sun, Xiaoyang Wu, Tong Wu, Jiaqi Wang, Dahua Lin, and Hengshuang Zhao. Gpt4point: A unified framework for point-language understanding and generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26417-26427, 2024.", + "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021.", + "Alec Radford. Improving language understanding by generative pre-training. 2018.", + "Yuan Tang, Xu Han, Xianzhi Li, Qiao Yu, Yixue Hao, Long Hu, and Min Chen. Minigpt-3d: Efficiently aligning 3d point clouds with large language models using 2d priors. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 6617-6626, 2024.", + "Yuan Tang, Xu Han, Xianzhi Li, Qiao Yu, Jinfeng Xu, Yixue Hao, Long Hu, and Min Chen. More text, less point: Towards 3d data-efficient point-language understanding. arXiv preprint arXiv:2408.15966, 2024.", + "Zehan Wang, Haifeng Huang, Yang Zhao, Ziang Zhang, and Zhou Zhao. Chat-3d: Data-efficiently tuning large language model for universal dialogue of 3d scenes. arXiv preprint arXiv:2308.08769, 2023.", + "Xiaosong Wang, Xiaofan Zhang, Guotai Wang, Junjun He, Zhongyu Li, Wentao Zhu, Yi Guo, Qi Dou, Xiaoxiao Li, Dequan Wang, et al. Openmedlab: An open-source platform for multi-modality foundation models in medicine. arXiv preprint arXiv:2402.18028, 2024." + ], + "bbox": [ + 84, + 68, + 488, + 888 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Runsen Xu, Xiaolong Wang, Tai Wang, Yilun Chen, Jiangmiao Pang, and Dahua Lin. Pointllm: Empowering large language models to understand point clouds. In European Conference on Computer Vision, pages 131-147. Springer, 2025.", + "Senqiao Yang, Jiaming Liu, Ray Zhang, Mingjie Pan, Zoey Guo, Xiaqi Li, Zehui Chen, Peng Gao, Yandong Guo, and Shanghang Zhang. Lidar-llm: Exploring the potential of large language models for 3d lidar understanding. arXiv preprint arXiv:2312.14074, 2023.", + "Fan Yang, Sicheng Zhao, Yanhao Zhang, Haoxiang Chen, Hui Chen, Wenbo Tang, Haonan Lu, Pengfei Xu, Zhenyu Yang, Jungong Han, et al. Llmi3d: Empowering llm with 3d perception from a single 2d image. arXiv preprint arXiv:2408.07422, 2024.", + "Zhihao Yuan, Jinke Ren, Chun-Mei Feng, Hengshuang Zhao, Shuguang Cui, and Zhen Li. Visual programming for zero-shot open-vocabulary 3d visual grounding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20623-20633, 2024.", + "Jirong Zha, Liang Han, Xiwang Dong, and Zhang Ren. Privacy-preserving push-sum distributed cubature information filter for nonlinear target tracking with switching directed topologies. ISA transactions, 136:16-30, 2023.", + "Jirong Zha, Nan Zhou, Zhenyu Liu, Tao Sun, and Xinlei Chen. Diffusion-based filter for fast and accurate collaborative tracking with low data transmission. Authorea Preprints, 2024.", + "Renrui Zhang, Ziyu Guo, Wei Zhang, Kunchang Li, Xupeng Miao, Bin Cui, Yu Qiao, Peng Gao, and Hongsheng Li. Pointclip: Point cloud understanding by clip. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8552-8562, 2022.", + "Sha Zhang, Di Huang, Jiajun Deng, Shixiang Tang, Wanli Ouyang, Tong He, and Yanyong Zhang. Agent3d-zero: An agent for zero-shot 3d understanding. In European Conference on Computer Vision, pages 186-202. Springer, 2024.", + "Junsheng Zhou, Jinsheng Wang, Baorui Ma, Yu-Shen Liu, Tiejun Huang, and Xinlong Wang. Uni3d: Exploring unified 3d representation at scale. arXiv preprint arXiv:2310.06773, 2023.", + "Xiangyang Zhu, Renrui Zhang, Bowei He, Ziyu Guo, Ziyao Zeng, Zipeng Qin, Shanghang Zhang, and Peng Gao. Pointclip v2: Prompting clip and gpt for powerful 3d open-world learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2639-2650, 2023.", + "Chenming Zhu, Tai Wang, Wenwei Zhang, Kai Chen, and Xihui Liu. Scanreason: Empowering 3d visual grounding with reasoning capabilities. In European Conference on Computer Vision, pages 151-168. Springer, 2024.", + "Chenming Zhu, Tai Wang, Wenwei Zhang, Jiangmiao Pang, and Xihui Liu. Llava-3d: A simple yet effective pathway to empowering lmm with 3d-awareness. arXiv preprint arXiv:2409.18125, 2024." + ], + "bbox": [ + 511, + 68, + 913, + 883 + ], + "page_idx": 8 + } +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05786/e603c6f4-386e-4380-abf7-2f18915b0ee6_model.json b/data/2025/2504_05xxx/2504.05786/e603c6f4-386e-4380-abf7-2f18915b0ee6_model.json new file mode 100644 index 0000000000000000000000000000000000000000..5ecaa967b60d095a35defe62e076cef75c7a67a5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05786/e603c6f4-386e-4380-abf7-2f18915b0ee6_model.json @@ -0,0 +1,1681 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.27, + 0.061, + 0.701 + ], + "angle": 270, + "content": "arXiv:2504.05786v1 [cs.CV] 8 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.099, + 0.109, + 0.9, + 0.132 + ], + "angle": 0, + "content": "How to Enable LLM with 3D Capacity? A Survey of Spatial Reasoning in LLM" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.148, + 0.8, + 0.168 + ], + "angle": 0, + "content": "Jirong Zha\\(^{1*}\\), Yuxuan Fan\\(^{2*}\\), Xiao Yang\\(^{2}\\), Chen Gao\\(^{1\\dagger}\\), Xinlei Chen\\(^{1\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.415, + 0.171, + 0.584, + 0.189 + ], + "angle": 0, + "content": "\\(^{1}\\)Tsinghua University" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.19, + 0.778, + 0.208 + ], + "angle": 0, + "content": "2The Hong Kong University of Science and Technology (Guang Zhou)" + }, + { + "type": "text", + "bbox": [ + 0.162, + 0.21, + 0.838, + 0.244 + ], + "angle": 0, + "content": "zhajirong23@mails.tsinghua.edu.cn, {yfan546, xyang856}@connect.hkust-gz.edu.cn, chgao96@gmail.com, chen.xinlei@sz.tsinghua.edu.cn" + }, + { + "type": "title", + "bbox": [ + 0.246, + 0.272, + 0.325, + 0.289 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.299, + 0.457, + 0.633 + ], + "angle": 0, + "content": "3D spatial understanding is essential in real-world applications such as robotics, autonomous vehicles, virtual reality, and medical imaging. Recently, Large Language Models (LLMs), having demonstrated remarkable success across various domains, have been leveraged to enhance 3D understanding tasks, showing potential to surpass traditional computer vision methods. In this survey, we present a comprehensive review of methods integrating LLMs with 3D spatial understanding. We propose a taxonomy that categorizes existing methods into three branches: image-based methods deriving 3D understanding from 2D visual data, point cloud-based methods working directly with 3D representations, and hybrid modality-based methods combining multiple data streams. We systematically review representative methods along these categories, covering data representations, architectural modifications, and training strategies that bridge textual and 3D modalities. Finally, we discuss current limitations, including dataset scarcity and computational challenges, while highlighting promising research directions in spatial perception, multi-modal fusion, and real-world applications." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.658, + 0.225, + 0.673 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.681, + 0.49, + 0.89 + ], + "angle": 0, + "content": "Large Language Models (LLMs) have evolved from basic neural networks to advanced transformer models like BERT [Kenton and Toutanova, 2019] and GPT [Radford, 2018], originally excelling at language tasks by learning from vast text datasets. Recent advancements, however, have extended these models beyond pure linguistic processing to encompass multimodal ability (In this paper, when we refer to LLMs, we specifically mean those that integrate multimodal functions). Their ability to capture complex patterns and relationships [Chen et al., 2024a] now holds promise for spatial reasoning tasks [Ma et al., 2024b]. By applying these enhanced models to challenges such as understanding 3D object relationships and spatial navigation, we open up new opportunities for advancing fields like robotics, computer vision, and augmented reality [Gao et al., 2024]." + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.272, + 0.909, + 0.412 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.509, + 0.424, + 0.915, + 0.476 + ], + "angle": 0, + "content": "Figure 1: Large Language Models can acquire 3D spatial reasoning capabilities through various input sources including multi-view images, RGB-D images, point clouds, and hybrid modalities, enabling the processing and understanding of three-dimensional information." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.506, + 0.914, + 0.659 + ], + "angle": 0, + "content": "At the same time, 3D data and 3D modeling techniques have seen significant developments [Ma et al., 2024c], finding extensive applications in virtual and augmented reality, robotics, autonomous vehicles, gaming, medical imaging, and more. Unlike traditional two-dimensional images, 3D data provides a richer view of objects and environments, capturing essential spatial relationships and geometry. Such information is critical for tasks like scene reconstruction, object manipulation, and autonomous navigation, where merely text-based descriptions or 2D representations may fall short of conveying the necessary depth or spatial context." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.667, + 0.916, + 0.892 + ], + "angle": 0, + "content": "LLMs help Spatial Understanding. Bringing these two fields together—powerful language understanding from LLMs and the spatial realism of 3D data—offers the potential for highly capable, context-aware systems. From a linguistic perspective, real-world descriptions often reference physical arrangement, orientation, or manipulations of objects in space. Text alone can be imprecise or ambiguous about size, shape, or relative positioning unless one can integrate a robust spatial or visual understanding. Consequently, there is growing interest in enhancing LLMs with a \"3D capacity\" that enables them to interpret, reason, and even generate three-dimensional representations based on natural language prompts. Such an integrated approach opens up exciting prospects: robots that can follow language instructions more effectively by grounding their commands in 3D context, architects who quickly prototype 3D layouts from textual descriptions, game design" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.083, + 0.069, + 0.488, + 0.111 + ], + "angle": 0, + "content": "ers who generate immersive environments for narrative-based experiences, and many other creative applications yet to be envisioned." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.114, + 0.487, + 0.337 + ], + "angle": 0, + "content": "Motivation. Although LLMs have been increasingly applied in 3D-related tasks, and Ma et al. [2024b] provided a systematic overview of this field, the rapid advancement of this domain has led to numerous new developments in recent months, necessitating an up-to-date survey that captures these recent breakthroughs. Integrating 3D capacity into LLMs faces several key challenges: (1) the scarcity of high-quality 3D datasets compared to abundant text corpora; (2) the fundamental mismatch between sequential text data and continuous 3D spatial structures, requiring specialized architectural adaptations; and (3) the intensive computational requirements for processing 3D data at scale. While early attempts at combining language and 3D have shown promise, current approaches often remain limited in scope, scalability, and generalization capability. Most existing solutions are domain-specific and lack the broad applicability characteristic of text-based LLMs." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.34, + 0.487, + 0.549 + ], + "angle": 0, + "content": "Contribution. The contributions of this work are summarized in the following three aspects: (1) A structured taxonomy. We provide a timely and comprehensive survey that distinguishes itself from the systematic overview offered by Ma et al. [2024b] by presenting a novel perspective on LLM applications in 3D-related tasks: our work constructs a structured taxonomy that categorizes existing research into three primary groups (Figure 2) and offers a forward-looking analysis of the latest breakthroughs, thereby underscoring our unique contributions and the significance of our approach in advancing the field. (2) A comprehensive review. Building on the proposed taxonomy, we systematically review the current research progress on LLMs for spatial reasoning tasks. (3) Future directions. We highlight the remaining limitations of existing works and suggest potential directions for future research." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.562, + 0.22, + 0.579 + ], + "angle": 0, + "content": "2 Preliminary" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.582, + 0.315, + 0.597 + ], + "angle": 0, + "content": "2.1 Large Language Models" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.599, + 0.487, + 0.71 + ], + "angle": 0, + "content": "Large Language Models (LLMs) have evolved from early word embeddings to context-aware models like BERT [Kenton and Toutanova, 2019]. Generative transformers such as GPT series [Radford, 2018], have further advanced text generation and few-shot learning. However, these models often struggle with spatial reasoning due to their focus on textual patterns, prompting efforts to integrate external spatial knowledge [Fu et al., 2024]." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.71, + 0.487, + 0.837 + ], + "angle": 0, + "content": "Vision-Language Models (VLMs) extend LLMs by aligning visual data with text. Early examples like CLIP [Radford et al., 2021] leverage co-attentional architectures and contrastive learning, while later models such as BLIP [Li et al., 2022] refine these techniques with larger datasets. Yet, most VLMs process only 2D data, limiting their ability to capture detailed 3D spatial configurations. Integrating 3D context via depth maps, point clouds, or voxels remains challenging, motivating ongoing research toward more robust spatial intelligence." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.844, + 0.277, + 0.858 + ], + "angle": 0, + "content": "2.2 3D Data Structures" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.861, + 0.488, + 0.89 + ], + "angle": 0, + "content": "3D data has different structures, which are essential for understanding the three-dimensional world, and common methods" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.069, + 0.915, + 0.125 + ], + "angle": 0, + "content": "include point clouds, voxel grids, polygonal meshes, neural fields, hybrid representations, and 3D Gaussian splatting. Point clouds represent shapes using discrete points, typically denoted as" + }, + { + "type": "equation", + "bbox": [ + 0.606, + 0.13, + 0.816, + 0.149 + ], + "angle": 0, + "content": "\\[\nP = \\left\\{p _ {i} \\in \\mathbb {R} ^ {3} \\mid i = 1, \\dots , N \\right\\},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.153, + 0.913, + 0.293 + ], + "angle": 0, + "content": "which are storage-efficient but lack surface topology. Voxel grids partition space into uniform cubes, with each voxel \\(V(i,j,k)\\) storing occupancy or distance values, providing detailed structure at the expense of increased memory usage at higher resolutions. Polygonal meshes compactly encode complex geometries through a set of vertices \\(\\{v_{i}\\}\\) and faces \\(\\{F_j\\}\\), though their unstructured and non-differentiable nature poses challenges for integration with neural networks. Neural fields offer an implicit approach by modeling 3D shapes as continuous and differentiable functions, such as" + }, + { + "type": "equation", + "bbox": [ + 0.651, + 0.298, + 0.771, + 0.315 + ], + "angle": 0, + "content": "\\[\nf _ {\\theta}: \\mathbb {R} ^ {3} \\rightarrow (c, \\sigma),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.32, + 0.915, + 0.46 + ], + "angle": 0, + "content": "which maps spatial coordinates to color \\(c\\) and density \\(\\sigma\\). Hybrid representations combine these neural fields with traditional volumetric methods (e.g., integrating \\(f_{\\theta}\\) with voxel grids) to achieve high-quality, real-time rendering. Meanwhile, 3D Gaussian splatting enhances point clouds by associating each point \\(p_i\\) with a covariance matrix \\(\\Sigma_i\\) and color \\(c_i\\), efficiently encoding radiance information for rendering. Each method has its unique strengths and trade-offs, making them suitable for different applications in 3D understanding and generation." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.469, + 0.707, + 0.485 + ], + "angle": 0, + "content": "2.3 Proposed taxonomy" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.488, + 0.915, + 0.848 + ], + "angle": 0, + "content": "We propose a taxonomy that classifies 3D-LLM research into three main categories based on input modalities and integration strategies, as shown in Figure 1: Image-based spatial reasoning encompasses approaches that derive 3D understanding from 2D images. This includes multi-view methods that reconstruct 3D scenes, RGB-D images providing explicit depth information, monocular 3D perception inferring depth from single views, and medical imaging applications. While these approaches benefit from readily available image data and existing vision models, they may struggle with occlusions and viewpoint limitations. Point cloud-based spatial reasoning works directly with 3D point cloud data through three alignment strategies: (1) Direct alignment that immediately connects point features with language embeddings, (2) Step-by-step alignment that follows sequential stages to bridge modalities, and (3) Task-specific alignment customized for particular spatial reasoning requirements. These methods maintain geometric fidelity but face challenges in handling unstructured 3D data. Hybrid modality-based spatial reasoning combines multiple data streams through either tightly or loosely coupled architectures. Tightly coupled approaches integrate modalities through shared embeddings or end-to-end training, while loosely coupled methods maintain modular components with defined interfaces between them. This enables leveraging complementary strengths across modalities but increases architectural complexity." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.847, + 0.914, + 0.89 + ], + "angle": 0, + "content": "This taxonomy provides a structured framework for understanding the diverse technical approaches in the field while highlighting the distinct challenges and trade-offs each branch" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.094, + 0.066, + 0.904, + 0.322 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.33, + 0.913, + 0.357 + ], + "angle": 0, + "content": "Figure 2: A Taxonomy of Models for Spatial Reasoning with LLMs: Image-based, Point Cloud-based, and Hybrid Modality-based Approaches and Their Subdivisions." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.379, + 0.487, + 0.58 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.132, + 0.592, + 0.438, + 0.607 + ], + "angle": 0, + "content": "Figure 3: An overview of image-based approaches." + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.629, + 0.489, + 0.658 + ], + "angle": 0, + "content": "must address. Figure 2 presents a detailed breakdown of representative works in each category." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.674, + 0.46, + 0.707 + ], + "angle": 0, + "content": "3 Recent Advances of Spatial Reasoning in LLM" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.715, + 0.37, + 0.733 + ], + "angle": 0, + "content": "3.1 Image-based Spatial Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.736, + 0.489, + 0.89 + ], + "angle": 0, + "content": "Image-based spatial reasoning methods can be categorized based on their input modalities: multi-view images, monocular images, RGB-D images, and 3D medical images shown in Figure 3. Each modality offers unique advantages for enhancing 3D understanding in Large Language Models (LLMs). Multi-view images provide spatial data from different perspectives, monocular images extract 3D insights from a single view, RGB-D images incorporate depth information, and 3D medical images address domain-specific challenges in healthcare. These categories highlight the strengths and challenges of each approach in improving spatial reasoning capabilities." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.381, + 0.755, + 0.396 + ], + "angle": 0, + "content": "3.1.1 Multi-view Images as input" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.397, + 0.916, + 0.827 + ], + "angle": 0, + "content": "Several studies explore multi-view images to enhance LLMs' spatial understanding. LLaVA-3D Zhu et al. [2024b] leverages multi-view images and 3D positional embeddings to create 3D Patches, achieving state-of-the-art 3D spatial understanding while maintaining 2D image understanding capabilities. Agent3D-Zero Zhang et al. [2024] utilizes multiple images from different viewpoints, enabling VLMs to perform robust reasoning and understand spatial relationships, achieving zero-shot scene understanding. ShapeLLM Qi et al. [2024a] also uses multi-view image input, with robustness to occlusions. Scene-LLM Fu et al. [2024] uses multi-view images to build 3D feature representations, incorporating scene-level and egocentric 3D information to support interactive planning. SpatialPIN Ma et al. [2024a] enhances VLM's spatial reasoning by decomposing, understanding and reconstructing explicit 3D representations from multi-view images and generalizes to various 3D tasks. LLMI3D Yang et al. [2024] extracts spatially enhanced local features from high-resolution images using CNNs and a depth predictor and uses ViT to obtain tokens from low-resolution images. It employs a spatially enhanced cross-branch attention mechanism to effectively mine spatial local features of objects and uses geometric projection to handle. Extracting multi-view features results in huge computational overhead and ignores the essential geometry and depth information. Additionally, plain texts often lead to ambiguities especially in cluttered and complex 3D environmentsChen et al. [2024c]. ConceptGraphs Gu et al. [2024] proposes a graph-structured representation for 3D scenes that operates with an open vocabulary, which is developed by utilizing 2D foundation models and integrating their outputs into a 3D format through multiview association." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.832, + 0.749, + 0.847 + ], + "angle": 0, + "content": "3.1.2 Monocular Image as input" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.847, + 0.915, + 0.89 + ], + "angle": 0, + "content": "LLMI3D Yang et al. [2024] uses a single 2D image for 3D perception, enhancing performance through spatial local feature mining, 3D query token decoding, and geometry-based" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.083, + 0.069, + 0.488, + 0.14 + ], + "angle": 0, + "content": "3D reasoning. It uses a depth predictor and CNN to extract spatial local features and uses learnable 3D query tokens for geometric coordinate regression. It combines black-box networks and white-box projection to address changes in camera focal lengths." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.146, + 0.3, + 0.161 + ], + "angle": 0, + "content": "3.1.3 RGB-D Image as Input" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.162, + 0.488, + 0.258 + ], + "angle": 0, + "content": "Depth is estimated in SpatialPIN Ma et al. [2024a] by ZoeDepth when finding field of view (FOV) through perspective fields and provided for 3D-scene understanding and reconstruction. M3D-LaMed Bai et al. [2024] pre-trains the 3D medical vision encoder with medical image slices along depth and introduces end-to-end tuning to integrate 3D information into LLM." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.266, + 0.326, + 0.281 + ], + "angle": 0, + "content": "3.1.4 3D Medical Image as input" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.281, + 0.489, + 0.573 + ], + "angle": 0, + "content": "Unlike previous research focused on 2D medical images, integrating multi-modal other information such as textual descriptions, M3D-LaMed Bai et al. [2024] is specifically designed for 3D CT images by analyzing spatial features. It demonstrates excellent performance across multiple tasks, including image-text retrieval, report generation, visual question answering, localization, and segmentation. In order to generate radiology reports automatically, a brand-new framework Liu et al. [2024a] is proposed to employs low-resolution (LR) visual tokens as queries to extract information from high-resolution (HR) tokens, ensuring that detailed information is retained across HR volumes while minimizing computational costs by processing only the HR-informed LR visual queries. 3D-CT-GPT Chen et al. [2024b], based medical visual language model, is tailored for the generation of radiology reports from 3D CT scans, with a focus on chest CTs. OpenMEDLab Wang et al. [2024] comprises and publishes a variety of medical foundation models to process multi-modal medical data including Color Fundus Photography (CFP), Optical Coherence Tomography (OCT), endoscopy videos, CT&MR volumes and other pathology images." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.58, + 0.212, + 0.593 + ], + "angle": 0, + "content": "3.1.5 Discussion" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.595, + 0.488, + 0.747 + ], + "angle": 0, + "content": "Image-based spatial reasoning methods offer significant advantages, such as easy data acquisition and integration with pre-trained 2D models. Multi-view images provide rich spatial information, while depth estimation enhances scene understanding. However, challenges remain, including limited depth from single views, scale uncertainty, occlusion, and viewpoint dependency. These methods also face issues with visual hallucinations, generalization to novel scenes, and high computational costs. Future research should focus on improving multi-view integration and depth estimation to address these limitations." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.758, + 0.483, + 0.789 + ], + "angle": 0, + "content": "3.2 Recent Advances of Point Cloud-based Spatial Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.791, + 0.488, + 0.89 + ], + "angle": 0, + "content": "As shown in Figure 4, point cloud-based spatial reasoning has advanced significantly in recent years, employing three main alignment methods: Direct, Step-by-step, and Task-specific Alignment. These methods are essential for integrating point cloud data with language models to enable effective spatial reasoning. Direct Alignment establishes immediate connections between point cloud features and language model em" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.068, + 0.906, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.543, + 0.257, + 0.878, + 0.271 + ], + "angle": 0, + "content": "Figure 4: An overview of point cloud-based approaches." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.291, + 0.913, + 0.361 + ], + "angle": 0, + "content": "beddings, while Step-by-step Alignment follows a sequential process through multiple stages. Task-specific Alignment is customized for particular spatial reasoning requirements. The choice of method depends on specific application needs and constraints." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.368, + 0.686, + 0.382 + ], + "angle": 0, + "content": "3.2.1 Direct Alignment" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.383, + 0.915, + 0.688 + ], + "angle": 0, + "content": "Direct alignment methods create direct connections between point cloud data and language models. PointCLIP [Zhang et al., 2022] was a pioneer, projecting point clouds into multiview depth maps and using CLIP's pre-trained visual encoder for feature extraction, which was then aligned with textual features through a hand-crafted template. This approach showed promising results in zero-shot and few-shot classification tasks by transferring 2D knowledge to the 3D domain. PointCLIP V2 [Zhu et al., 2023] improved the projection quality with a realistic projection module and used GPT-3 for generating 3D-specific text descriptions, achieving better performance in zero-shot classification, part segmentation, and object detection. Chat-Scene [Huang et al., 2024] introduced object identifiers to facilitate object referencing during user-assistant interactions, representing scenes through object-centric embeddings. PointLLM [Xu et al., 2025] advanced the field by integrating a point cloud encoder with a powerful LLM, effectively fusing geometric, appearance, and linguistic information, and overcoming data scarcity with automated generation. These methods demonstrate the potential for effective 3D point cloud understanding through language models, enabling improved spatial reasoning and human-AI interaction." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.694, + 0.729, + 0.708 + ], + "angle": 0, + "content": "3.2.2 Step-by-step Alignment" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.709, + 0.914, + 0.89 + ], + "angle": 0, + "content": "Step-by-step alignment has gained popularity in integrating point cloud features with language models. Notable approaches include GPT4Point [Qi et al., 2024b], which uses a Bert-based Point-QFormer for point-text feature alignment, followed by object generation. Grounded 3D-LLMs [Chen et al., 2024d] first aligns 3D scene embeddings with textual descriptions via contrastive pre-training, then fine-tunes with referent tokens. LiDAR-LLMs [Yang et al., 2023] employ a three-stage process: cross-modal alignment, object-centric learning, and high-level instruction fine-tuning. MiniGPT-3D [Tang et al., 2024a] follows a four-stage strategy, from point cloud projection to advanced model enhancements using Mixture of Query Experts. GreenPLM [Tang et al., 2024b] uses" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.088, + 0.068, + 0.482, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.103, + 0.212, + 0.466, + 0.226 + ], + "angle": 0, + "content": "Figure 5: An overview of hybrid modality-based approaches." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.245, + 0.489, + 0.316 + ], + "angle": 0, + "content": "a three-stage method that aligns a text encoder with an LLM using large text data, followed by point-LLM alignment with 3D data. These step-by-step approaches highlight the gradual improvement of spatial reasoning in 3D contexts, offering valuable insights for future research." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.321, + 0.305, + 0.335 + ], + "angle": 0, + "content": "3.2.3 Task-specific Alignment" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.335, + 0.489, + 0.584 + ], + "angle": 0, + "content": "Task-specific alignment customizes models for specific spatial reasoning tasks to improve performance and generalization. SceneVerse [Jia et al., 2024] introduces a large 3D vision-language dataset and Grounded Pre-training for Scenes (GPS), using multi-level contrastive alignment for unified scene-text alignment, achieving state-of-the-art results in tasks like 3D visual grounding and question answering. LL3DA [Chen et al., 2024c] presents a dialogue system that integrates textual instructions and visual interactions, excelling in complex 3D environments. Chat-3D [Wang et al., 2023] proposes a three-stage training scheme to align 3D scene representations with language models, capturing spatial relations with limited data. VisProg [Yuan et al., 2024] introduces visual programming for zero-shot open-vocabulary 3D grounding, leveraging LLMs to generate and execute programmatic representations. These task-specific approaches highlight the importance of adapting models to complex spatial relationships, enabling robust performance even with limited data or zero-shot tasks." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.591, + 0.212, + 0.604 + ], + "angle": 0, + "content": "3.2.4 Discussion" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.605, + 0.489, + 0.705 + ], + "angle": 0, + "content": "The three alignment approaches—Direct, Step-by-step, and Task-specific—each have distinct strengths and challenges. Direct alignment offers efficiency and quick results but struggles with complex spatial relationships. Step-by-step alignment improves feature integration at the cost of higher computational resources and training time. Task-specific alignment excels in specialized tasks but may lack broader applicability." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.711, + 0.45, + 0.727 + ], + "angle": 0, + "content": "3.3 Hybrid Modality-based Spatial Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.729, + 0.487, + 0.84 + ], + "angle": 0, + "content": "Hybrid modality-based spatial reasoning integrates point clouds, images, and LLMs through Tightly Coupled and Loosely Coupled approaches, as shown in Figure 5. The Tightly Coupled approach fosters close integration, enabling seamless interaction and high performance, while the Loosely Coupled approach promotes modularity, allowing independent operation of components for greater scalability and flexibility at the cost of reduced real-time interaction." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.847, + 0.252, + 0.861 + ], + "angle": 0, + "content": "3.3.1 Tightly Coupled" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.861, + 0.489, + 0.89 + ], + "angle": 0, + "content": "Several recent works have explored tightly integrated approaches for spatial reasoning across point clouds, images and" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.069, + 0.916, + 0.431 + ], + "angle": 0, + "content": "language modalities: Point-Bind [Guo et al., 2023] proposes a joint embedding space to align point clouds with images and text through contrastive learning. It leverages ImageBind to construct unified representations that enable tasks like zero-shot classification, open-world understanding and multi-modal generation. The tight coupling allows Point-Bind to reason about point clouds using both visual and linguistic cues. JM3D [Ji et al., 2024] introduces a Structured Multimodal Organizer that tightly fuses multi-view images and hierarchical text trees with point clouds. This coupled architecture enables detailed spatial understanding by leveraging complementary information across modalities. The Joint Multi-modal Alignment further enhances the synergistic relationships between visual and linguistic features. Uni3D [Zhou et al., 2023] employs a unified transformer architecture that directly aligns point cloud features with image-text representations. By tightly coupling the modalities through end-to-end training, it achieves strong performance on tasks like zero-shot classification and open-world understanding. The shared backbone enables efficient scaling to billion-parameter models. Uni3D-LLM [Liu et al., 2024b] extends this tight coupling to LLMs through an LLM-to-Generator mapping block. This enables unified perception, generation and editing of point clouds guided by natural language. The tight integration allows leveraging rich semantic knowledge from LLMs while maintaining high-quality 3D understanding." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.436, + 0.682, + 0.451 + ], + "angle": 0, + "content": "3.3.2 Loosely Coupled" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.452, + 0.915, + 0.785 + ], + "angle": 0, + "content": "Loosely coupled approaches maintain greater independence between different modalities while still enabling interaction through well-defined interfaces. MultiPLY [Hong et al., 2024] proposes a multisensory embodied LLM that handles multiple input modalities (visual, audio, tactile, thermal) through separate encoders. The modalities are processed independently and communicate through action tokens and state tokens. This decoupled design allows the system to process each modality with specialized encoders optimized for that data type, while enabling scalability and modularity in the system architecture. Similarly, UniPoint-LLM [Liu et al.] introduces a Multimodal Universal Token Space (MUTS) that loosely connects point clouds and images through independent encoders and a shared mapping layer. This modular design allows easy integration of new modalities and simplified training by only requiring alignment between new modalities and text, rather than pairwise alignment between all modalities. The main benefits of loosely coupled architectures include greater modularity and flexibility in system design, easier integration of new modalities, and independent scaling of different components. However, this approach may result in less optimal joint representation learning, reduced real-time interaction capabilities, and potential information loss between modalities compared to tightly coupled approaches." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.791, + 0.638, + 0.804 + ], + "angle": 0, + "content": "3.3.3 Discussion" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.806, + 0.914, + 0.89 + ], + "angle": 0, + "content": "The choice between tightly and loosely coupled approaches presents important tradeoffs in multimodal spatial reasoning systems. Tightly coupled approaches like Point-Bind and JM3D offer stronger joint representation learning and real-time interaction capabilities through end-to-end training and shared feature spaces. This makes them particularly suitable" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.087, + 0.066, + 0.913, + 0.396 + ], + "angle": 0, + "content": "
ModelData SourceAlignment TypePre-trainingFine-tuningTaskCode
Image - basedLLaVA-3D [Zhu et al., 2024b]Multi-view Images-3D VQA, 3D Scene Understandingcode
Agent3D-Zero [Zhang et al., 2024]Multi-view Images-3D VQA, 3D Semantic Segmentation
ShapeLLM [Qi et al., 2024a]Multi-view Images-3D Object Classification, 3D Scene Captioningcode
Scene-LLM [Fu et al., 2024]Multi-view Images-3D VQA, Dense Captioning
SpatialPIN [Ma et al., 2024a]RGB-D Images-3D Motion Planning, Task Video Generation
LLMI3D [Yang et al., 2024]Monocular Images-3D Grounding, 3D VQA
Spatialvm [Chen et al., 2024a]Monocular Images-Dense Reward Annotator, Spatial Data Generationcode
M3D-LaMed [Bai et al., 2024]Medical Images-3D VQA, 3D VLPcode
HILT [Liu et al., 2024a]Medical Images-3DHRG
3D-CT-GPT [Chen et al., 2024b]Medical Images-Radiology Report Generation, 3D VQA
OpenMEDLab [Wang et al., 2024]Medical Images-Medical Imagingcode
Point Cloud - basedPointLLM [Xu et al., 2025]Point CloudDirect Alignment3D Object Classification, 3D Object Captioningcode
Chat-Scene [Huang et al., 2024]Point CloudDirect Alignment3D Visual Grounding, 3D Scene Captioningcode
PointCLIP [Zhang et al., 2022]Point CloudDirect Alignment3D Point Cloud Classificationcode
PointCLIPv2 [Zhu et al., 2023]Point CloudDirect Alignment3D Point Cloud Classificationcode
GPT4Point [Qi et al., 2024b]Point CloudStep-by-step Alignment3D Object Understandingcode
MiniGPT-3D [Tang et al., 2024a]Point CloudStep-by-step Alignment3D Object Classification, 3D Object Captioningcode
GreenPLM [Tang et al., 2024b]Point CloudStep-by-step Alignment3D Object Classificationcode
Grounded 3D-LLM [Chen et al., 2024d]Point CloudStep-by-step Alignment3D Object Detection, 3D VQAcode
Lidar-LLM [Yang et al., 2023]Point CloudStep-by-step Alignment3D Captioning, 3D Groundingcode
3D-LLaVA [Deng et al., 2025]Point CloudTask-specific Alignment3D VQA, 3D Captioningcode
ScanReason [Zhu et al., 2024a]Point CloudTask-specific Alignment3D Reasoning Groundingcode
SegPoint [He et al., 2024]Point CloudTask-specific Alignment3D Instruction Segmentation
Kestrel [Fei et al., 2024]Point CloudTask-specific AlignmentPart-Aware Point Grounding
SIG3D [Man et al., 2024]Point CloudTask-specific AlignmentSituation Estimationcode
Chat-3D [Wang et al., 2023]Point CloudTask-specific Alignment3D VQAcode
LL3DA [Chen et al., 2024c]Point CloudTask-specific Alignment3D Dense Captioningcode
Hybrid - basedPoint-bind [Guo et al., 2023]Point cloud, ImageTightly Coupled3D Cross-modal Retrieval, Any-to-3D Generationcode
JM3D [Ji et al., 2024]Point cloud, ImageTightly CoupledImage-3D Retrieval, 3D Part Segmentationcode
Uni3D [Zhou et al., 2023]Point cloud, ImageTightly CoupledZero-shot Shape Classificationcode
Uni3D-LLM [Liu et al., 2024b]Point cloud, ImageTightly Coupled3D VQA
MultiPLY [Hong et al., 2024]Point cloud, ImageLoosely CoupledObject retrievalcode
UniPoint-LLM [Liu et al.]Point cloud, ImageLoosely Coupled3D generation, 3D VQA
" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.405, + 0.914, + 0.459 + ], + "angle": 0, + "content": "Table 1: Taxonomy of Large Language Models with spatial reasoning capability. This table presents a comprehensive comparison of various 3D vision-language models categorized by their input modalities (image-based, point cloud-based, and hybrid-based), showing their data sources, alignment types, training strategies (pre-training and fine-tuning), primary tasks, and code availability. The models are organized into three main categories based on their input type: image-based models, point cloud-based models, and hybrid models that utilize both modalities." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.481, + 0.487, + 0.718 + ], + "angle": 0, + "content": "for applications requiring detailed spatial understanding and precise control. However, they can be more complex to train and scale, and adding new modalities may require significant architectural changes. In contrast, loosely coupled approaches like MultiPLY and UniPoint-LLM provide greater modularity and flexibility, making them easier to extend and maintain. They allow independent optimization of different components and simplified training procedures, but may sacrifice some performance in tasks requiring fine-grained cross-modal understanding. The optimal choice ultimately depends on specific application requirements - tightly coupled architectures may be preferred for specialized high-performance systems, while loosely coupled designs better suit general-purpose platforms prioritizing extensibility and maintainability. Future work may explore hybrid approaches that combine the benefits of both paradigms, potentially using adaptive coupling mechanisms that adjust based on task demands." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.735, + 0.225, + 0.753 + ], + "angle": 0, + "content": "4 Applications" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.758, + 0.488, + 0.857 + ], + "angle": 0, + "content": "A key research focus leverages LLMs to enhance robotic embodied intelligence, enabling machines to interpret natural language commands for real-world tasks. This includes robotic control, navigation, and manipulation, where LLMs parse instructions, generate action plans, and adapt to dynamic environments—for instance, guiding robots to locate objects in cluttered spaces using text-based prompts." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.861, + 0.49, + 0.892 + ], + "angle": 0, + "content": "3D Scene Understanding. Advanced 3D scene analysis integrates multimodal data (e.g., images, point clouds, text) for" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.481, + 0.914, + 0.567 + ], + "angle": 0, + "content": "tasks like open-vocabulary segmentation, semantic mapping, and spatial reasoning. Central to this is 3D visual question answering (3D-VQA), requiring models to interpret queries about object attributes, spatial relationships, or contextual roles within scenes. Context-aware systems further account for user perspectives to deliver precise responses." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.573, + 0.915, + 0.687 + ], + "angle": 0, + "content": "Cross-Domain Applications. In healthcare, LLMs analyze volumetric medical scans (e.g., CT) for lesion detection and automated diagnostics. Autonomous driving systems utilize 3D-capable LLMs to interpret traffic scenes, aiding object detection [Zha et al., 2023, 2024] and path planning. Design-oriented applications include generating indoor layouts from textual requirements, while educational tools employ interactive 3D environments to teach spatial concepts." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.716, + 0.825, + 0.733 + ], + "angle": 0, + "content": "5 Challenges and Future Directions" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.75, + 0.915, + 0.891 + ], + "angle": 0, + "content": "Table 1 summarizes the models that leverage LLMs to assist graph-related tasks according to the proposed taxonomy. Based on the above review and analysis, we believe that there is still much space for further enhancement in this field. Recent advances in integrating LLMs with three-dimensional (3D) data have demonstrated considerable promise. However, numerous challenges must still be overcome to realize robust and practical 3D-aware LLMs. Below, we summarize these obstacles and then outline potential pathways to address them, highlighting key research directions for the future." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.086, + 0.069, + 0.212, + 0.084 + ], + "angle": 0, + "content": "5.1 Challenges" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.096, + 0.486, + 0.22 + ], + "angle": 0, + "content": "Weak Spatial Reasoning and Representation. Multimodal LLMs (MLLMs) exhibit limited acuity in 3D spatial understanding, struggling with fine-grained relationships (e.g., front/back distinctions, occluded object localization) and precise geometric outputs (distances, angles). These issues stem partly from mismatches between unstructured point clouds and sequence-based LLM architectures, where high-dimensional 3D data incur prohibitive token counts or oversimplified encodings." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.226, + 0.486, + 0.42 + ], + "angle": 0, + "content": "Data and Evaluation Gaps. Progress in 3D-aware LLMs is hindered by the scarcity of high-quality 3D-text paired datasets. Unlike the abundant resources for 2D images and video, the 3D domain lacks standardized, richly annotated datasets crucial for training robust models. Existing benchmarks focus mainly on discriminative tasks like classification and retrieval—emphasizing category differentiation rather than generating rich, descriptive 3D scene outputs. Consequently, evaluations often rely on subjective metrics (e.g., human or GPT-based judgments) that can lack consistency. Advancing the field requires developing objective, comprehensive benchmarks that assess both open-vocabulary generation and the spatial plausibility of descriptions relative to the underlying 3D structure." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.426, + 0.486, + 0.648 + ], + "angle": 0, + "content": "Multimodal Integration and Generalization. Fusing 3D data (e.g., point clouds) with other modalities like 2D imagery, audio, or text poses significant challenges due to their distinct structural characteristics. The conversion and alignment of high-dimensional 3D data with lower-dimensional representations can lead to a loss of intricate details, diluting the original 3D richness. Moreover, current models often struggle with open-vocabulary recognition, limiting their ability to identify or describe objects outside of their training data—especially when encountering unseen scenes or novel objects. This difficulty is further compounded by the variability of natural language, from colloquial expressions to domain-specific terminology, and by noisy inputs. Thus, more sophisticated multimodal integration techniques and generalization strategies are needed to preserve geometric fidelity while accommodating diverse, unpredictable inputs." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.653, + 0.486, + 0.75 + ], + "angle": 0, + "content": "Complex Task Definition. While 3D-aware LLMs excel in controlled settings, they lack frameworks for nuanced language-context inference in dynamic environments. Task decomposition and scalable encoding methods are needed to balance geometric fidelity with computational tractability, particularly for interactive applications requiring real-time spatial reasoning." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.765, + 0.263, + 0.779 + ], + "angle": 0, + "content": "5.2 Future Directions" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.792, + 0.486, + 0.889 + ], + "angle": 0, + "content": "Enhancing 3D Perception and Representations. Addressing spatial reasoning gaps requires richer 3D-text datasets (e.g., from robotics, gaming, autonomous driving) and model architectures that encode geometric relationships. Multi-view data and robust depth cues can improve orientation, distance, and occlusion estimation. Compact 3D tokens and refined encoding/decoding methods may bridge unstructured point" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.07, + 0.912, + 0.097 + ], + "angle": 0, + "content": "clouds with sequence-based models, enabling fine-grained spatial understanding and generation." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.102, + 0.913, + 0.269 + ], + "angle": 0, + "content": "Multi-Modal Fusion and Instruction Understanding. Tighter integration of modalities (point clouds, images, text, audio) via unified latent spaces or attention mechanisms could preserve subtle geometric and semantic details. Enhanced instruction processing—including hierarchical task decomposition, contextual interpretation, and robustness to dialects/terminology—would improve compositional reasoning in 3D environments and broaden real-world applicability. Furthermore, by leveraging these integrated representations, models can more adeptly adapt to complex instructions and novel scenarios, ultimately paving the way for more robust and versatile 3D reasoning systems." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.274, + 0.913, + 0.385 + ], + "angle": 0, + "content": "Cross-Scene Generalization and Robust Evaluation. Open-vocabulary 3D understanding demands large-scale pretraining on diverse scenes and transfer/lifelong learning paradigms for adapting to novel objects or environments. This understanding extends beyond predefined categories to generalize to unseen objects and scenes. For instance, models need to comprehend \"an old rocking chair\" even if this specific type of chair never appeared in the training data." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.39, + 0.913, + 0.487 + ], + "angle": 0, + "content": "Expanding Applications for Autonomous Systems. 3D-aware LLMs hold potential in robotics (navigation, manipulation), medical imaging (lesion detection), architectural design, and interactive education. Future systems may integrate environmental constraints, user perspectives, and object affordances for autonomous planning and decision-making in dynamic 3D contexts." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.488, + 0.913, + 0.599 + ], + "angle": 0, + "content": "Collectively, these challenges and potential directions underscore the field's rapid evolution and its equally significant open questions. Moving forward, more robust 3D-specific data resources, better model architectures, and more refined evaluation protocols will be essential to unlock the full potential of LLMs in three-dimensional settings—and ultimately bring intelligent, multimodal understanding closer to real-world deployment." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.617, + 0.637, + 0.632 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.639, + 0.913, + 0.889 + ], + "angle": 0, + "content": "The integration of LLMs with 3D data is a dynamic research area. This survey categorized 3D-LLM research into image-based, point cloud-based, and hybrid modality-based spatial reasoning. It reviewed state-of-the-art methods, their applications in multiple fields, and associated challenges. Notably, image-based methods have data-related advantages but face issues like depth information shortage. Point cloud-based methods offer precise 3D details but encounter data-handling difficulties. Hybrid methods combine strengths yet struggle with data alignment. Applications are diverse, but challenges such as weak spatial perception, data scarcity, and evaluation problems exist. Future research should focus on enhancing 3D perception, improving multi-modal fusion, expanding generalization, developing evaluation metrics, enhancing instruction understanding, optimizing 3D representations, and exploring continuous learning. By addressing these, we can unlock the full potential of 3D-aware LLMs for real-world deployment and industry advancement." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.085, + 0.068, + 0.181, + 0.083 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.087, + 0.487, + 0.142 + ], + "angle": 0, + "content": "Fan Bai, Yuxin Du, Tiejun Huang, Max Q-H Meng, and Bo Zhao. M3d: Advancing 3d medical image analysis with multi-modal large language models. arXiv preprint arXiv:2404.00578, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.147, + 0.488, + 0.23 + ], + "angle": 0, + "content": "Boyuan Chen, Zhuo Xu, Sean Kirmani, Brain Ichter, Dorsa Sadigh, Leonidas Guibas, and Fei Xia. Spatialvlm: Endowing vision-language models with spatial reasoning capabilities. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14455-14465, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.234, + 0.487, + 0.303 + ], + "angle": 0, + "content": "Hao Chen, Wei Zhao, Yingli Li, Tianyang Zhong, Yisong Wang, Youlan Shang, Lei Guo, Junwei Han, Tianming Liu, Jun Liu, et al. 3d-ct-gpt: Generating 3d radiology reports through integration of large vision-language models. arXiv preprint arXiv:2409.19330, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.307, + 0.487, + 0.391 + ], + "angle": 0, + "content": "Sijin Chen, Xin Chen, Chi Zhang, Mingsheng Li, Gang Yu, Hao Fei, Hongyuan Zhu, Jiayuan Fan, and Tao Chen. Ll3da: Visual interactive instruction tuning for omni-3d understanding reasoning and planning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26428-26438, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.394, + 0.487, + 0.449 + ], + "angle": 0, + "content": "Yilun Chen, Shuai Yang, Haifeng Huang, Tai Wang, Ruiyuan Lyu, Runsen Xu, Dahua Lin, and Jiangmiao Pang. Grounded 3d-llm with referent tokens. arXiv preprint arXiv:2405.10370, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.454, + 0.487, + 0.509 + ], + "angle": 0, + "content": "Jiajun Deng, Tianyu He, Li Jiang, Tianyu Wang, Feras Dayoub, and Ian Reid. 3d-llava: Towards generalist 3d lmm's with omni superpoint transformer. arXiv preprint arXiv:2501.01163, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.513, + 0.487, + 0.569 + ], + "angle": 0, + "content": "Junjie Fei, Mahmoud Ahmed, Jian Ding, Eslam Mohamed Bakr, and Mohamed Elhoseiny. Kestrel: Point grounding multimodal llm for part-aware 3d vision-language understanding. arXiv preprint arXiv:2405.18937, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.573, + 0.487, + 0.627 + ], + "angle": 0, + "content": "Rao Fu, Jingyu Liu, Xilun Chen, Yixin Nie, and Wenhan Xiong. Scene-llm: Extending language model for 3d visual understanding and reasoning. arXiv preprint arXiv:2403.11401, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.632, + 0.487, + 0.701 + ], + "angle": 0, + "content": "Chen Gao, Baining Zhao, Weichen Zhang, Jinzhu Mao, Jun Zhang, Zhiheng Zheng, Fanhang Man, Jianjie Fang, Zile Zhou, Jinqiang Cui, et al. Embodiedcity: A benchmark platform for embodied agent in real-world city environment. arXiv preprint arXiv:2410.09604, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.705, + 0.487, + 0.801 + ], + "angle": 0, + "content": "Qiao Gu, Ali Kuwajerwala, Sacha Morin, Krishna Murthy Jatavallabhula, Bipasha Sen, Aditya Agarwal, Corban Rivera, William Paul, Kirsty Ellis, Rama Chellappa, et al. Conceptgraphs: Open-vocabulary 3d scene graphs for perception and planning. In 2024 IEEE International Conference on Robotics and Automation (ICRA), pages 5021-5028. IEEE, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.806, + 0.487, + 0.889 + ], + "angle": 0, + "content": "Ziyu Guo, Renrui Zhang, Xiangyang Zhu, Yiwen Tang, Xi-anzheng Ma, Jiaming Han, Kexin Chen, Peng Gao, Xi-anzhi Li, Hongsheng Li, et al. Point-bind & point-llm: Aligning point cloud with multi-modality for 3d understanding, generation, and instruction following. arXiv preprint arXiv:2309.00615, 2023." + }, + { + "type": "list", + "bbox": [ + 0.086, + 0.087, + 0.488, + 0.889 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.07, + 0.913, + 0.125 + ], + "angle": 0, + "content": "Shuting He, Henghui Ding, Xudong Jiang, and Bihan Wen. Segpoint: Segment any point cloud via large language model. In European Conference on Computer Vision, pages 349-367. Springer, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.13, + 0.913, + 0.198 + ], + "angle": 0, + "content": "Yining Hong, Haoyu Zhen, Peihao Chen, Shuhong Zheng, Yilun Du, Zhenfang Chen, and Chuang Gan. 3d-llm: Injecting the 3d world into large language models. Advances in Neural Information Processing Systems, 36:20482-20494, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.205, + 0.913, + 0.274 + ], + "angle": 0, + "content": "Yining Hong, Zishuo Zheng, Peihao Chen, Yian Wang, Junyan Li, and Chuang Gan. Multiply: A multisensory object-centric embodied large language model in 3d world. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26406-26416, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.279, + 0.913, + 0.362 + ], + "angle": 0, + "content": "Haifeng Huang, Yilun Chen, Zehan Wang, Rongjie Huang, Runsen Xu, Tai Wang, Luping Liu, Xize Cheng, Yang Zhao, Jiangmiao Pang, et al. Chat-scene: Bridging 3d scene and large language models with object identifiers. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.367, + 0.913, + 0.424 + ], + "angle": 0, + "content": "Jiayi Ji, Haowei Wang, Changli Wu, Yiwei Ma, Xiaoshuai Sun, and Rongrong Ji. Jm3d & jm3d-llm: Elevating 3d representation with joint multi-modal cues. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.429, + 0.913, + 0.498 + ], + "angle": 0, + "content": "Baoxiong Jia, Yixin Chen, Huangyue Yu, Yan Wang, Xuesong Niu, Tengyu Liu, Qing Li, and Siyuan Huang. Sceneverse: Scaling 3d vision-language learning for grounded scene understanding. In European Conference on Computer Vision, pages 289-310. Springer, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.503, + 0.913, + 0.572 + ], + "angle": 0, + "content": "Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.577, + 0.913, + 0.646 + ], + "angle": 0, + "content": "Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In International conference on machine learning, pages 12888-12900. PMLR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.651, + 0.913, + 0.708 + ], + "angle": 0, + "content": "Dingning Liu, Xiaoshui Huang, Zhihui Wang, Zhenfei Yin, Peng Gao, Yujiao Wu, Yuenan Hou, Xinzhu Ma, and Wanli Ouyang. Pointmllm: Aligning multi-modality with llm for point cloud understanding, generation and editing." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.713, + 0.913, + 0.781 + ], + "angle": 0, + "content": "Che Liu, Zhongwei Wan, Yuqi Wang, Hui Shen, Haozhe Wang, Kangyu Zheng, Mi Zhang, and Rossella Arcucci. Benchmarking and boosting radiology report generation for 3d high-resolution medical images. arXiv preprint arXiv:2406.07146, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.787, + 0.913, + 0.856 + ], + "angle": 0, + "content": "Dingning Liu, Xiaoshui Huang, Yuenan Hou, Zhihui Wang, Zhenfei Yin, Yongshun Gong, Peng Gao, and Wanli Ouyang. Uni3d-llm: Unifying point cloud perception, generation and editing with large language models. arXiv preprint arXiv:2402.03327, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.861, + 0.913, + 0.89 + ], + "angle": 0, + "content": "Chenyang Ma, Kai Lu, Ta-Ying Cheng, Niki Trigoni, and Andrew Markham. Spatialpin: Enhancing spatial reasoning" + }, + { + "type": "list", + "bbox": [ + 0.513, + 0.07, + 0.913, + 0.89 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.069, + 0.489, + 0.111 + ], + "angle": 0, + "content": "capabilities of vision-language models through prompting and interacting 3d priors. arXiv preprint arXiv:2403.13438, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.117, + 0.49, + 0.201 + ], + "angle": 0, + "content": "Xianzheng Ma, Yash Bhalgat, Brandon Smart, Shuai Chen, Xinghui Li, Jian Ding, Jindong Gu, Dave Zhenyu Chen, Songyou Peng, Jia-Wang Bian, et al. When llms step into the 3d world: A survey and meta-analysis of 3d tasks via multi-modal large language models. arXiv preprint arXiv:2405.10255, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.207, + 0.49, + 0.263 + ], + "angle": 0, + "content": "Yuexin Ma, Tai Wang, Xuyang Bai, Huitong Yang, Yuenan Hou, Yaming Wang, Yu Qiao, Ruigang Yang, and Xinge Zhu. Vision-centric bev perception: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.269, + 0.489, + 0.325 + ], + "angle": 0, + "content": "Yunze Man, Liang-Yan Gui, and Yu-Xiong Wang. Situational awareness matters in 3d vision language reasoning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13678-13688, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.331, + 0.49, + 0.401 + ], + "angle": 0, + "content": "Zekun Qi, Runpei Dong, Shaochen Zhang, Haoran Geng, Chunrui Han, Zheng Ge, Li Yi, and Kaiheng Ma. Shapellm: Universal 3d object understanding for embodied interaction. In European Conference on Computer Vision, pages 214-238. Springer, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.407, + 0.49, + 0.49 + ], + "angle": 0, + "content": "Zhangyang Qi, Ye Fang, Zeyi Sun, Xiaoyang Wu, Tong Wu, Jiaqi Wang, Dahua Lin, and Hengshuang Zhao. Gpt4point: A unified framework for point-language understanding and generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26417-26427, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.496, + 0.49, + 0.58 + ], + "angle": 0, + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.586, + 0.489, + 0.614 + ], + "angle": 0, + "content": "Alec Radford. Improving language understanding by generative pre-training. 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.62, + 0.489, + 0.69 + ], + "angle": 0, + "content": "Yuan Tang, Xu Han, Xianzhi Li, Qiao Yu, Yixue Hao, Long Hu, and Min Chen. Minigpt-3d: Efficiently aligning 3d point clouds with large language models using 2d priors. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 6617-6626, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.696, + 0.489, + 0.753 + ], + "angle": 0, + "content": "Yuan Tang, Xu Han, Xianzhi Li, Qiao Yu, Jinfeng Xu, Yixue Hao, Long Hu, and Min Chen. More text, less point: Towards 3d data-efficient point-language understanding. arXiv preprint arXiv:2408.15966, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.758, + 0.489, + 0.813 + ], + "angle": 0, + "content": "Zehan Wang, Haifeng Huang, Yang Zhao, Ziang Zhang, and Zhou Zhao. Chat-3d: Data-efficiently tuning large language model for universal dialogue of 3d scenes. arXiv preprint arXiv:2308.08769, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.819, + 0.49, + 0.89 + ], + "angle": 0, + "content": "Xiaosong Wang, Xiaofan Zhang, Guotai Wang, Junjun He, Zhongyu Li, Wentao Zhu, Yi Guo, Qi Dou, Xiaoxiao Li, Dequan Wang, et al. Openmedlab: An open-source platform for multi-modality foundation models in medicine. arXiv preprint arXiv:2402.18028, 2024." + }, + { + "type": "list", + "bbox": [ + 0.085, + 0.069, + 0.49, + 0.89 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.069, + 0.914, + 0.138 + ], + "angle": 0, + "content": "Runsen Xu, Xiaolong Wang, Tai Wang, Yilun Chen, Jiangmiao Pang, and Dahua Lin. Pointllm: Empowering large language models to understand point clouds. In European Conference on Computer Vision, pages 131-147. Springer, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.144, + 0.914, + 0.213 + ], + "angle": 0, + "content": "Senqiao Yang, Jiaming Liu, Ray Zhang, Mingjie Pan, Zoey Guo, Xiaqi Li, Zehui Chen, Peng Gao, Yandong Guo, and Shanghang Zhang. Lidar-llm: Exploring the potential of large language models for 3d lidar understanding. arXiv preprint arXiv:2312.14074, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.218, + 0.914, + 0.287 + ], + "angle": 0, + "content": "Fan Yang, Sicheng Zhao, Yanhao Zhang, Haoxiang Chen, Hui Chen, Wenbo Tang, Haonan Lu, Pengfei Xu, Zhenyu Yang, Jungong Han, et al. Llmi3d: Empowering llm with 3d perception from a single 2d image. arXiv preprint arXiv:2408.07422, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.292, + 0.915, + 0.362 + ], + "angle": 0, + "content": "Zhihao Yuan, Jinke Ren, Chun-Mei Feng, Hengshuang Zhao, Shuguang Cui, and Zhen Li. Visual programming for zero-shot open-vocabulary 3d visual grounding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20623-20633, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.366, + 0.915, + 0.422 + ], + "angle": 0, + "content": "Jirong Zha, Liang Han, Xiwang Dong, and Zhang Ren. Privacy-preserving push-sum distributed cubature information filter for nonlinear target tracking with switching directed topologies. ISA transactions, 136:16-30, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.427, + 0.914, + 0.481 + ], + "angle": 0, + "content": "Jirong Zha, Nan Zhou, Zhenyu Liu, Tao Sun, and Xinlei Chen. Diffusion-based filter for fast and accurate collaborative tracking with low data transmission. Authorea Preprints, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.487, + 0.914, + 0.557 + ], + "angle": 0, + "content": "Renrui Zhang, Ziyu Guo, Wei Zhang, Kunchang Li, Xupeng Miao, Bin Cui, Yu Qiao, Peng Gao, and Hongsheng Li. Pointclip: Point cloud understanding by clip. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8552-8562, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.562, + 0.914, + 0.63 + ], + "angle": 0, + "content": "Sha Zhang, Di Huang, Jiajun Deng, Shixiang Tang, Wanli Ouyang, Tong He, and Yanyong Zhang. Agent3d-zero: An agent for zero-shot 3d understanding. In European Conference on Computer Vision, pages 186-202. Springer, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.636, + 0.914, + 0.69 + ], + "angle": 0, + "content": "Junsheng Zhou, Jinsheng Wang, Baorui Ma, Yu-Shen Liu, Tiejun Huang, and Xinlong Wang. Uni3d: Exploring unified 3d representation at scale. arXiv preprint arXiv:2310.06773, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.696, + 0.914, + 0.766 + ], + "angle": 0, + "content": "Xiangyang Zhu, Renrui Zhang, Bowei He, Ziyu Guo, Ziyao Zeng, Zipeng Qin, Shanghang Zhang, and Peng Gao. Pointclip v2: Prompting clip and gpt for powerful 3d open-world learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2639-2650, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.77, + 0.914, + 0.826 + ], + "angle": 0, + "content": "Chenming Zhu, Tai Wang, Wenwei Zhang, Kai Chen, and Xihui Liu. Scanreason: Empowering 3d visual grounding with reasoning capabilities. In European Conference on Computer Vision, pages 151-168. Springer, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.831, + 0.914, + 0.885 + ], + "angle": 0, + "content": "Chenming Zhu, Tai Wang, Wenwei Zhang, Jiangmiao Pang, and Xihui Liu. Llava-3d: A simple yet effective pathway to empowering lmm with 3d-awareness. arXiv preprint arXiv:2409.18125, 2024." + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.069, + 0.915, + 0.885 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05786/e603c6f4-386e-4380-abf7-2f18915b0ee6_origin.pdf b/data/2025/2504_05xxx/2504.05786/e603c6f4-386e-4380-abf7-2f18915b0ee6_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..54d5292910b20fa9dbf5c7047b4ed3be8dde60f7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05786/e603c6f4-386e-4380-abf7-2f18915b0ee6_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43044cf426c1d8e135bcc319d8e69ef6ff891e01a61be4ee98744763c99eafef +size 2019424 diff --git a/data/2025/2504_05xxx/2504.05786/full.md b/data/2025/2504_05xxx/2504.05786/full.md new file mode 100644 index 0000000000000000000000000000000000000000..b4c5289f758a3ce733f8082c350b1166c645e147 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05786/full.md @@ -0,0 +1,247 @@ +# How to Enable LLM with 3D Capacity? A Survey of Spatial Reasoning in LLM + +Jirong Zha $^{1*}$ , Yuxuan Fan $^{2*}$ , Xiao Yang $^{2}$ , Chen Gao $^{1\dagger}$ , Xinlei Chen $^{1\dagger}$ + +$^{1}$ Tsinghua University + +2The Hong Kong University of Science and Technology (Guang Zhou) + +zhajirong23@mails.tsinghua.edu.cn, {yfan546, xyang856}@connect.hkust-gz.edu.cn, chgao96@gmail.com, chen.xinlei@sz.tsinghua.edu.cn + +# Abstract + +3D spatial understanding is essential in real-world applications such as robotics, autonomous vehicles, virtual reality, and medical imaging. Recently, Large Language Models (LLMs), having demonstrated remarkable success across various domains, have been leveraged to enhance 3D understanding tasks, showing potential to surpass traditional computer vision methods. In this survey, we present a comprehensive review of methods integrating LLMs with 3D spatial understanding. We propose a taxonomy that categorizes existing methods into three branches: image-based methods deriving 3D understanding from 2D visual data, point cloud-based methods working directly with 3D representations, and hybrid modality-based methods combining multiple data streams. We systematically review representative methods along these categories, covering data representations, architectural modifications, and training strategies that bridge textual and 3D modalities. Finally, we discuss current limitations, including dataset scarcity and computational challenges, while highlighting promising research directions in spatial perception, multi-modal fusion, and real-world applications. + +# 1 Introduction + +Large Language Models (LLMs) have evolved from basic neural networks to advanced transformer models like BERT [Kenton and Toutanova, 2019] and GPT [Radford, 2018], originally excelling at language tasks by learning from vast text datasets. Recent advancements, however, have extended these models beyond pure linguistic processing to encompass multimodal ability (In this paper, when we refer to LLMs, we specifically mean those that integrate multimodal functions). Their ability to capture complex patterns and relationships [Chen et al., 2024a] now holds promise for spatial reasoning tasks [Ma et al., 2024b]. By applying these enhanced models to challenges such as understanding 3D object relationships and spatial navigation, we open up new opportunities for advancing fields like robotics, computer vision, and augmented reality [Gao et al., 2024]. + +![](images/1c4b8c1a8e39901fceb895fcf642206dfa4b227423055dcf9d75196664ca28d0.jpg) +Figure 1: Large Language Models can acquire 3D spatial reasoning capabilities through various input sources including multi-view images, RGB-D images, point clouds, and hybrid modalities, enabling the processing and understanding of three-dimensional information. + +At the same time, 3D data and 3D modeling techniques have seen significant developments [Ma et al., 2024c], finding extensive applications in virtual and augmented reality, robotics, autonomous vehicles, gaming, medical imaging, and more. Unlike traditional two-dimensional images, 3D data provides a richer view of objects and environments, capturing essential spatial relationships and geometry. Such information is critical for tasks like scene reconstruction, object manipulation, and autonomous navigation, where merely text-based descriptions or 2D representations may fall short of conveying the necessary depth or spatial context. + +LLMs help Spatial Understanding. Bringing these two fields together—powerful language understanding from LLMs and the spatial realism of 3D data—offers the potential for highly capable, context-aware systems. From a linguistic perspective, real-world descriptions often reference physical arrangement, orientation, or manipulations of objects in space. Text alone can be imprecise or ambiguous about size, shape, or relative positioning unless one can integrate a robust spatial or visual understanding. Consequently, there is growing interest in enhancing LLMs with a "3D capacity" that enables them to interpret, reason, and even generate three-dimensional representations based on natural language prompts. Such an integrated approach opens up exciting prospects: robots that can follow language instructions more effectively by grounding their commands in 3D context, architects who quickly prototype 3D layouts from textual descriptions, game design + +ers who generate immersive environments for narrative-based experiences, and many other creative applications yet to be envisioned. + +Motivation. Although LLMs have been increasingly applied in 3D-related tasks, and Ma et al. [2024b] provided a systematic overview of this field, the rapid advancement of this domain has led to numerous new developments in recent months, necessitating an up-to-date survey that captures these recent breakthroughs. Integrating 3D capacity into LLMs faces several key challenges: (1) the scarcity of high-quality 3D datasets compared to abundant text corpora; (2) the fundamental mismatch between sequential text data and continuous 3D spatial structures, requiring specialized architectural adaptations; and (3) the intensive computational requirements for processing 3D data at scale. While early attempts at combining language and 3D have shown promise, current approaches often remain limited in scope, scalability, and generalization capability. Most existing solutions are domain-specific and lack the broad applicability characteristic of text-based LLMs. + +Contribution. The contributions of this work are summarized in the following three aspects: (1) A structured taxonomy. We provide a timely and comprehensive survey that distinguishes itself from the systematic overview offered by Ma et al. [2024b] by presenting a novel perspective on LLM applications in 3D-related tasks: our work constructs a structured taxonomy that categorizes existing research into three primary groups (Figure 2) and offers a forward-looking analysis of the latest breakthroughs, thereby underscoring our unique contributions and the significance of our approach in advancing the field. (2) A comprehensive review. Building on the proposed taxonomy, we systematically review the current research progress on LLMs for spatial reasoning tasks. (3) Future directions. We highlight the remaining limitations of existing works and suggest potential directions for future research. + +# 2 Preliminary + +# 2.1 Large Language Models + +Large Language Models (LLMs) have evolved from early word embeddings to context-aware models like BERT [Kenton and Toutanova, 2019]. Generative transformers such as GPT series [Radford, 2018], have further advanced text generation and few-shot learning. However, these models often struggle with spatial reasoning due to their focus on textual patterns, prompting efforts to integrate external spatial knowledge [Fu et al., 2024]. + +Vision-Language Models (VLMs) extend LLMs by aligning visual data with text. Early examples like CLIP [Radford et al., 2021] leverage co-attentional architectures and contrastive learning, while later models such as BLIP [Li et al., 2022] refine these techniques with larger datasets. Yet, most VLMs process only 2D data, limiting their ability to capture detailed 3D spatial configurations. Integrating 3D context via depth maps, point clouds, or voxels remains challenging, motivating ongoing research toward more robust spatial intelligence. + +# 2.2 3D Data Structures + +3D data has different structures, which are essential for understanding the three-dimensional world, and common methods + +include point clouds, voxel grids, polygonal meshes, neural fields, hybrid representations, and 3D Gaussian splatting. Point clouds represent shapes using discrete points, typically denoted as + +$$ +P = \left\{p _ {i} \in \mathbb {R} ^ {3} \mid i = 1, \dots , N \right\}, +$$ + +which are storage-efficient but lack surface topology. Voxel grids partition space into uniform cubes, with each voxel $V(i,j,k)$ storing occupancy or distance values, providing detailed structure at the expense of increased memory usage at higher resolutions. Polygonal meshes compactly encode complex geometries through a set of vertices $\{v_{i}\}$ and faces $\{F_j\}$ , though their unstructured and non-differentiable nature poses challenges for integration with neural networks. Neural fields offer an implicit approach by modeling 3D shapes as continuous and differentiable functions, such as + +$$ +f _ {\theta}: \mathbb {R} ^ {3} \rightarrow (c, \sigma), +$$ + +which maps spatial coordinates to color $c$ and density $\sigma$ . Hybrid representations combine these neural fields with traditional volumetric methods (e.g., integrating $f_{\theta}$ with voxel grids) to achieve high-quality, real-time rendering. Meanwhile, 3D Gaussian splatting enhances point clouds by associating each point $p_i$ with a covariance matrix $\Sigma_i$ and color $c_i$ , efficiently encoding radiance information for rendering. Each method has its unique strengths and trade-offs, making them suitable for different applications in 3D understanding and generation. + +# 2.3 Proposed taxonomy + +We propose a taxonomy that classifies 3D-LLM research into three main categories based on input modalities and integration strategies, as shown in Figure 1: Image-based spatial reasoning encompasses approaches that derive 3D understanding from 2D images. This includes multi-view methods that reconstruct 3D scenes, RGB-D images providing explicit depth information, monocular 3D perception inferring depth from single views, and medical imaging applications. While these approaches benefit from readily available image data and existing vision models, they may struggle with occlusions and viewpoint limitations. Point cloud-based spatial reasoning works directly with 3D point cloud data through three alignment strategies: (1) Direct alignment that immediately connects point features with language embeddings, (2) Step-by-step alignment that follows sequential stages to bridge modalities, and (3) Task-specific alignment customized for particular spatial reasoning requirements. These methods maintain geometric fidelity but face challenges in handling unstructured 3D data. Hybrid modality-based spatial reasoning combines multiple data streams through either tightly or loosely coupled architectures. Tightly coupled approaches integrate modalities through shared embeddings or end-to-end training, while loosely coupled methods maintain modular components with defined interfaces between them. This enables leveraging complementary strengths across modalities but increases architectural complexity. + +This taxonomy provides a structured framework for understanding the diverse technical approaches in the field while highlighting the distinct challenges and trade-offs each branch + +![](images/10f4e27138d77cef1e66632497ab60fcb460eb82533892d1e5d74ab2bb75012d.jpg) +Figure 2: A Taxonomy of Models for Spatial Reasoning with LLMs: Image-based, Point Cloud-based, and Hybrid Modality-based Approaches and Their Subdivisions. + +![](images/f869e9459876e49023165cdcf85439fb6449fd17975fe58ef81ce28fb4e6e702.jpg) +Figure 3: An overview of image-based approaches. + +must address. Figure 2 presents a detailed breakdown of representative works in each category. + +# 3 Recent Advances of Spatial Reasoning in LLM + +# 3.1 Image-based Spatial Reasoning + +Image-based spatial reasoning methods can be categorized based on their input modalities: multi-view images, monocular images, RGB-D images, and 3D medical images shown in Figure 3. Each modality offers unique advantages for enhancing 3D understanding in Large Language Models (LLMs). Multi-view images provide spatial data from different perspectives, monocular images extract 3D insights from a single view, RGB-D images incorporate depth information, and 3D medical images address domain-specific challenges in healthcare. These categories highlight the strengths and challenges of each approach in improving spatial reasoning capabilities. + +# 3.1.1 Multi-view Images as input + +Several studies explore multi-view images to enhance LLMs' spatial understanding. LLaVA-3D Zhu et al. [2024b] leverages multi-view images and 3D positional embeddings to create 3D Patches, achieving state-of-the-art 3D spatial understanding while maintaining 2D image understanding capabilities. Agent3D-Zero Zhang et al. [2024] utilizes multiple images from different viewpoints, enabling VLMs to perform robust reasoning and understand spatial relationships, achieving zero-shot scene understanding. ShapeLLM Qi et al. [2024a] also uses multi-view image input, with robustness to occlusions. Scene-LLM Fu et al. [2024] uses multi-view images to build 3D feature representations, incorporating scene-level and egocentric 3D information to support interactive planning. SpatialPIN Ma et al. [2024a] enhances VLM's spatial reasoning by decomposing, understanding and reconstructing explicit 3D representations from multi-view images and generalizes to various 3D tasks. LLMI3D Yang et al. [2024] extracts spatially enhanced local features from high-resolution images using CNNs and a depth predictor and uses ViT to obtain tokens from low-resolution images. It employs a spatially enhanced cross-branch attention mechanism to effectively mine spatial local features of objects and uses geometric projection to handle. Extracting multi-view features results in huge computational overhead and ignores the essential geometry and depth information. Additionally, plain texts often lead to ambiguities especially in cluttered and complex 3D environmentsChen et al. [2024c]. ConceptGraphs Gu et al. [2024] proposes a graph-structured representation for 3D scenes that operates with an open vocabulary, which is developed by utilizing 2D foundation models and integrating their outputs into a 3D format through multiview association. + +# 3.1.2 Monocular Image as input + +LLMI3D Yang et al. [2024] uses a single 2D image for 3D perception, enhancing performance through spatial local feature mining, 3D query token decoding, and geometry-based + +3D reasoning. It uses a depth predictor and CNN to extract spatial local features and uses learnable 3D query tokens for geometric coordinate regression. It combines black-box networks and white-box projection to address changes in camera focal lengths. + +# 3.1.3 RGB-D Image as Input + +Depth is estimated in SpatialPIN Ma et al. [2024a] by ZoeDepth when finding field of view (FOV) through perspective fields and provided for 3D-scene understanding and reconstruction. M3D-LaMed Bai et al. [2024] pre-trains the 3D medical vision encoder with medical image slices along depth and introduces end-to-end tuning to integrate 3D information into LLM. + +# 3.1.4 3D Medical Image as input + +Unlike previous research focused on 2D medical images, integrating multi-modal other information such as textual descriptions, M3D-LaMed Bai et al. [2024] is specifically designed for 3D CT images by analyzing spatial features. It demonstrates excellent performance across multiple tasks, including image-text retrieval, report generation, visual question answering, localization, and segmentation. In order to generate radiology reports automatically, a brand-new framework Liu et al. [2024a] is proposed to employs low-resolution (LR) visual tokens as queries to extract information from high-resolution (HR) tokens, ensuring that detailed information is retained across HR volumes while minimizing computational costs by processing only the HR-informed LR visual queries. 3D-CT-GPT Chen et al. [2024b], based medical visual language model, is tailored for the generation of radiology reports from 3D CT scans, with a focus on chest CTs. OpenMEDLab Wang et al. [2024] comprises and publishes a variety of medical foundation models to process multi-modal medical data including Color Fundus Photography (CFP), Optical Coherence Tomography (OCT), endoscopy videos, CT&MR volumes and other pathology images. + +# 3.1.5 Discussion + +Image-based spatial reasoning methods offer significant advantages, such as easy data acquisition and integration with pre-trained 2D models. Multi-view images provide rich spatial information, while depth estimation enhances scene understanding. However, challenges remain, including limited depth from single views, scale uncertainty, occlusion, and viewpoint dependency. These methods also face issues with visual hallucinations, generalization to novel scenes, and high computational costs. Future research should focus on improving multi-view integration and depth estimation to address these limitations. + +# 3.2 Recent Advances of Point Cloud-based Spatial Reasoning + +As shown in Figure 4, point cloud-based spatial reasoning has advanced significantly in recent years, employing three main alignment methods: Direct, Step-by-step, and Task-specific Alignment. These methods are essential for integrating point cloud data with language models to enable effective spatial reasoning. Direct Alignment establishes immediate connections between point cloud features and language model em + +![](images/01a7846f1deba904180c76b95b69058c7981b82fbb85fd782f363da73b4c3476.jpg) +Figure 4: An overview of point cloud-based approaches. + +beddings, while Step-by-step Alignment follows a sequential process through multiple stages. Task-specific Alignment is customized for particular spatial reasoning requirements. The choice of method depends on specific application needs and constraints. + +# 3.2.1 Direct Alignment + +Direct alignment methods create direct connections between point cloud data and language models. PointCLIP [Zhang et al., 2022] was a pioneer, projecting point clouds into multiview depth maps and using CLIP's pre-trained visual encoder for feature extraction, which was then aligned with textual features through a hand-crafted template. This approach showed promising results in zero-shot and few-shot classification tasks by transferring 2D knowledge to the 3D domain. PointCLIP V2 [Zhu et al., 2023] improved the projection quality with a realistic projection module and used GPT-3 for generating 3D-specific text descriptions, achieving better performance in zero-shot classification, part segmentation, and object detection. Chat-Scene [Huang et al., 2024] introduced object identifiers to facilitate object referencing during user-assistant interactions, representing scenes through object-centric embeddings. PointLLM [Xu et al., 2025] advanced the field by integrating a point cloud encoder with a powerful LLM, effectively fusing geometric, appearance, and linguistic information, and overcoming data scarcity with automated generation. These methods demonstrate the potential for effective 3D point cloud understanding through language models, enabling improved spatial reasoning and human-AI interaction. + +# 3.2.2 Step-by-step Alignment + +Step-by-step alignment has gained popularity in integrating point cloud features with language models. Notable approaches include GPT4Point [Qi et al., 2024b], which uses a Bert-based Point-QFormer for point-text feature alignment, followed by object generation. Grounded 3D-LLMs [Chen et al., 2024d] first aligns 3D scene embeddings with textual descriptions via contrastive pre-training, then fine-tunes with referent tokens. LiDAR-LLMs [Yang et al., 2023] employ a three-stage process: cross-modal alignment, object-centric learning, and high-level instruction fine-tuning. MiniGPT-3D [Tang et al., 2024a] follows a four-stage strategy, from point cloud projection to advanced model enhancements using Mixture of Query Experts. GreenPLM [Tang et al., 2024b] uses + +![](images/6f283c8ccff38019b629c7d3baf89d8f01eae9a6757bd3db846940b8dbae1d64.jpg) +Figure 5: An overview of hybrid modality-based approaches. + +a three-stage method that aligns a text encoder with an LLM using large text data, followed by point-LLM alignment with 3D data. These step-by-step approaches highlight the gradual improvement of spatial reasoning in 3D contexts, offering valuable insights for future research. + +# 3.2.3 Task-specific Alignment + +Task-specific alignment customizes models for specific spatial reasoning tasks to improve performance and generalization. SceneVerse [Jia et al., 2024] introduces a large 3D vision-language dataset and Grounded Pre-training for Scenes (GPS), using multi-level contrastive alignment for unified scene-text alignment, achieving state-of-the-art results in tasks like 3D visual grounding and question answering. LL3DA [Chen et al., 2024c] presents a dialogue system that integrates textual instructions and visual interactions, excelling in complex 3D environments. Chat-3D [Wang et al., 2023] proposes a three-stage training scheme to align 3D scene representations with language models, capturing spatial relations with limited data. VisProg [Yuan et al., 2024] introduces visual programming for zero-shot open-vocabulary 3D grounding, leveraging LLMs to generate and execute programmatic representations. These task-specific approaches highlight the importance of adapting models to complex spatial relationships, enabling robust performance even with limited data or zero-shot tasks. + +# 3.2.4 Discussion + +The three alignment approaches—Direct, Step-by-step, and Task-specific—each have distinct strengths and challenges. Direct alignment offers efficiency and quick results but struggles with complex spatial relationships. Step-by-step alignment improves feature integration at the cost of higher computational resources and training time. Task-specific alignment excels in specialized tasks but may lack broader applicability. + +# 3.3 Hybrid Modality-based Spatial Reasoning + +Hybrid modality-based spatial reasoning integrates point clouds, images, and LLMs through Tightly Coupled and Loosely Coupled approaches, as shown in Figure 5. The Tightly Coupled approach fosters close integration, enabling seamless interaction and high performance, while the Loosely Coupled approach promotes modularity, allowing independent operation of components for greater scalability and flexibility at the cost of reduced real-time interaction. + +# 3.3.1 Tightly Coupled + +Several recent works have explored tightly integrated approaches for spatial reasoning across point clouds, images and + +language modalities: Point-Bind [Guo et al., 2023] proposes a joint embedding space to align point clouds with images and text through contrastive learning. It leverages ImageBind to construct unified representations that enable tasks like zero-shot classification, open-world understanding and multi-modal generation. The tight coupling allows Point-Bind to reason about point clouds using both visual and linguistic cues. JM3D [Ji et al., 2024] introduces a Structured Multimodal Organizer that tightly fuses multi-view images and hierarchical text trees with point clouds. This coupled architecture enables detailed spatial understanding by leveraging complementary information across modalities. The Joint Multi-modal Alignment further enhances the synergistic relationships between visual and linguistic features. Uni3D [Zhou et al., 2023] employs a unified transformer architecture that directly aligns point cloud features with image-text representations. By tightly coupling the modalities through end-to-end training, it achieves strong performance on tasks like zero-shot classification and open-world understanding. The shared backbone enables efficient scaling to billion-parameter models. Uni3D-LLM [Liu et al., 2024b] extends this tight coupling to LLMs through an LLM-to-Generator mapping block. This enables unified perception, generation and editing of point clouds guided by natural language. The tight integration allows leveraging rich semantic knowledge from LLMs while maintaining high-quality 3D understanding. + +# 3.3.2 Loosely Coupled + +Loosely coupled approaches maintain greater independence between different modalities while still enabling interaction through well-defined interfaces. MultiPLY [Hong et al., 2024] proposes a multisensory embodied LLM that handles multiple input modalities (visual, audio, tactile, thermal) through separate encoders. The modalities are processed independently and communicate through action tokens and state tokens. This decoupled design allows the system to process each modality with specialized encoders optimized for that data type, while enabling scalability and modularity in the system architecture. Similarly, UniPoint-LLM [Liu et al.] introduces a Multimodal Universal Token Space (MUTS) that loosely connects point clouds and images through independent encoders and a shared mapping layer. This modular design allows easy integration of new modalities and simplified training by only requiring alignment between new modalities and text, rather than pairwise alignment between all modalities. The main benefits of loosely coupled architectures include greater modularity and flexibility in system design, easier integration of new modalities, and independent scaling of different components. However, this approach may result in less optimal joint representation learning, reduced real-time interaction capabilities, and potential information loss between modalities compared to tightly coupled approaches. + +# 3.3.3 Discussion + +The choice between tightly and loosely coupled approaches presents important tradeoffs in multimodal spatial reasoning systems. Tightly coupled approaches like Point-Bind and JM3D offer stronger joint representation learning and real-time interaction capabilities through end-to-end training and shared feature spaces. This makes them particularly suitable + +
ModelData SourceAlignment TypePre-trainingFine-tuningTaskCode
Image - basedLLaVA-3D [Zhu et al., 2024b]Multi-view Images-3D VQA, 3D Scene Understandingcode
Agent3D-Zero [Zhang et al., 2024]Multi-view Images-3D VQA, 3D Semantic Segmentation
ShapeLLM [Qi et al., 2024a]Multi-view Images-3D Object Classification, 3D Scene Captioningcode
Scene-LLM [Fu et al., 2024]Multi-view Images-3D VQA, Dense Captioning
SpatialPIN [Ma et al., 2024a]RGB-D Images-3D Motion Planning, Task Video Generation
LLMI3D [Yang et al., 2024]Monocular Images-3D Grounding, 3D VQA
Spatialvm [Chen et al., 2024a]Monocular Images-Dense Reward Annotator, Spatial Data Generationcode
M3D-LaMed [Bai et al., 2024]Medical Images-3D VQA, 3D VLPcode
HILT [Liu et al., 2024a]Medical Images-3DHRG
3D-CT-GPT [Chen et al., 2024b]Medical Images-Radiology Report Generation, 3D VQA
OpenMEDLab [Wang et al., 2024]Medical Images-Medical Imagingcode
Point Cloud - basedPointLLM [Xu et al., 2025]Point CloudDirect Alignment3D Object Classification, 3D Object Captioningcode
Chat-Scene [Huang et al., 2024]Point CloudDirect Alignment3D Visual Grounding, 3D Scene Captioningcode
PointCLIP [Zhang et al., 2022]Point CloudDirect Alignment3D Point Cloud Classificationcode
PointCLIPv2 [Zhu et al., 2023]Point CloudDirect Alignment3D Point Cloud Classificationcode
GPT4Point [Qi et al., 2024b]Point CloudStep-by-step Alignment3D Object Understandingcode
MiniGPT-3D [Tang et al., 2024a]Point CloudStep-by-step Alignment3D Object Classification, 3D Object Captioningcode
GreenPLM [Tang et al., 2024b]Point CloudStep-by-step Alignment3D Object Classificationcode
Grounded 3D-LLM [Chen et al., 2024d]Point CloudStep-by-step Alignment3D Object Detection, 3D VQAcode
Lidar-LLM [Yang et al., 2023]Point CloudStep-by-step Alignment3D Captioning, 3D Groundingcode
3D-LLaVA [Deng et al., 2025]Point CloudTask-specific Alignment3D VQA, 3D Captioningcode
ScanReason [Zhu et al., 2024a]Point CloudTask-specific Alignment3D Reasoning Groundingcode
SegPoint [He et al., 2024]Point CloudTask-specific Alignment3D Instruction Segmentation
Kestrel [Fei et al., 2024]Point CloudTask-specific AlignmentPart-Aware Point Grounding
SIG3D [Man et al., 2024]Point CloudTask-specific AlignmentSituation Estimationcode
Chat-3D [Wang et al., 2023]Point CloudTask-specific Alignment3D VQAcode
LL3DA [Chen et al., 2024c]Point CloudTask-specific Alignment3D Dense Captioningcode
Hybrid - basedPoint-bind [Guo et al., 2023]Point cloud, ImageTightly Coupled3D Cross-modal Retrieval, Any-to-3D Generationcode
JM3D [Ji et al., 2024]Point cloud, ImageTightly CoupledImage-3D Retrieval, 3D Part Segmentationcode
Uni3D [Zhou et al., 2023]Point cloud, ImageTightly CoupledZero-shot Shape Classificationcode
Uni3D-LLM [Liu et al., 2024b]Point cloud, ImageTightly Coupled3D VQA
MultiPLY [Hong et al., 2024]Point cloud, ImageLoosely CoupledObject retrievalcode
UniPoint-LLM [Liu et al.]Point cloud, ImageLoosely Coupled3D generation, 3D VQA
+ +Table 1: Taxonomy of Large Language Models with spatial reasoning capability. This table presents a comprehensive comparison of various 3D vision-language models categorized by their input modalities (image-based, point cloud-based, and hybrid-based), showing their data sources, alignment types, training strategies (pre-training and fine-tuning), primary tasks, and code availability. The models are organized into three main categories based on their input type: image-based models, point cloud-based models, and hybrid models that utilize both modalities. + +for applications requiring detailed spatial understanding and precise control. However, they can be more complex to train and scale, and adding new modalities may require significant architectural changes. In contrast, loosely coupled approaches like MultiPLY and UniPoint-LLM provide greater modularity and flexibility, making them easier to extend and maintain. They allow independent optimization of different components and simplified training procedures, but may sacrifice some performance in tasks requiring fine-grained cross-modal understanding. The optimal choice ultimately depends on specific application requirements - tightly coupled architectures may be preferred for specialized high-performance systems, while loosely coupled designs better suit general-purpose platforms prioritizing extensibility and maintainability. Future work may explore hybrid approaches that combine the benefits of both paradigms, potentially using adaptive coupling mechanisms that adjust based on task demands. + +# 4 Applications + +A key research focus leverages LLMs to enhance robotic embodied intelligence, enabling machines to interpret natural language commands for real-world tasks. This includes robotic control, navigation, and manipulation, where LLMs parse instructions, generate action plans, and adapt to dynamic environments—for instance, guiding robots to locate objects in cluttered spaces using text-based prompts. + +3D Scene Understanding. Advanced 3D scene analysis integrates multimodal data (e.g., images, point clouds, text) for + +tasks like open-vocabulary segmentation, semantic mapping, and spatial reasoning. Central to this is 3D visual question answering (3D-VQA), requiring models to interpret queries about object attributes, spatial relationships, or contextual roles within scenes. Context-aware systems further account for user perspectives to deliver precise responses. + +Cross-Domain Applications. In healthcare, LLMs analyze volumetric medical scans (e.g., CT) for lesion detection and automated diagnostics. Autonomous driving systems utilize 3D-capable LLMs to interpret traffic scenes, aiding object detection [Zha et al., 2023, 2024] and path planning. Design-oriented applications include generating indoor layouts from textual requirements, while educational tools employ interactive 3D environments to teach spatial concepts. + +# 5 Challenges and Future Directions + +Table 1 summarizes the models that leverage LLMs to assist graph-related tasks according to the proposed taxonomy. Based on the above review and analysis, we believe that there is still much space for further enhancement in this field. Recent advances in integrating LLMs with three-dimensional (3D) data have demonstrated considerable promise. However, numerous challenges must still be overcome to realize robust and practical 3D-aware LLMs. Below, we summarize these obstacles and then outline potential pathways to address them, highlighting key research directions for the future. + +# 5.1 Challenges + +Weak Spatial Reasoning and Representation. Multimodal LLMs (MLLMs) exhibit limited acuity in 3D spatial understanding, struggling with fine-grained relationships (e.g., front/back distinctions, occluded object localization) and precise geometric outputs (distances, angles). These issues stem partly from mismatches between unstructured point clouds and sequence-based LLM architectures, where high-dimensional 3D data incur prohibitive token counts or oversimplified encodings. + +Data and Evaluation Gaps. Progress in 3D-aware LLMs is hindered by the scarcity of high-quality 3D-text paired datasets. Unlike the abundant resources for 2D images and video, the 3D domain lacks standardized, richly annotated datasets crucial for training robust models. Existing benchmarks focus mainly on discriminative tasks like classification and retrieval—emphasizing category differentiation rather than generating rich, descriptive 3D scene outputs. Consequently, evaluations often rely on subjective metrics (e.g., human or GPT-based judgments) that can lack consistency. Advancing the field requires developing objective, comprehensive benchmarks that assess both open-vocabulary generation and the spatial plausibility of descriptions relative to the underlying 3D structure. + +Multimodal Integration and Generalization. Fusing 3D data (e.g., point clouds) with other modalities like 2D imagery, audio, or text poses significant challenges due to their distinct structural characteristics. The conversion and alignment of high-dimensional 3D data with lower-dimensional representations can lead to a loss of intricate details, diluting the original 3D richness. Moreover, current models often struggle with open-vocabulary recognition, limiting their ability to identify or describe objects outside of their training data—especially when encountering unseen scenes or novel objects. This difficulty is further compounded by the variability of natural language, from colloquial expressions to domain-specific terminology, and by noisy inputs. Thus, more sophisticated multimodal integration techniques and generalization strategies are needed to preserve geometric fidelity while accommodating diverse, unpredictable inputs. + +Complex Task Definition. While 3D-aware LLMs excel in controlled settings, they lack frameworks for nuanced language-context inference in dynamic environments. Task decomposition and scalable encoding methods are needed to balance geometric fidelity with computational tractability, particularly for interactive applications requiring real-time spatial reasoning. + +# 5.2 Future Directions + +Enhancing 3D Perception and Representations. Addressing spatial reasoning gaps requires richer 3D-text datasets (e.g., from robotics, gaming, autonomous driving) and model architectures that encode geometric relationships. Multi-view data and robust depth cues can improve orientation, distance, and occlusion estimation. Compact 3D tokens and refined encoding/decoding methods may bridge unstructured point + +clouds with sequence-based models, enabling fine-grained spatial understanding and generation. + +Multi-Modal Fusion and Instruction Understanding. Tighter integration of modalities (point clouds, images, text, audio) via unified latent spaces or attention mechanisms could preserve subtle geometric and semantic details. Enhanced instruction processing—including hierarchical task decomposition, contextual interpretation, and robustness to dialects/terminology—would improve compositional reasoning in 3D environments and broaden real-world applicability. Furthermore, by leveraging these integrated representations, models can more adeptly adapt to complex instructions and novel scenarios, ultimately paving the way for more robust and versatile 3D reasoning systems. + +Cross-Scene Generalization and Robust Evaluation. Open-vocabulary 3D understanding demands large-scale pretraining on diverse scenes and transfer/lifelong learning paradigms for adapting to novel objects or environments. This understanding extends beyond predefined categories to generalize to unseen objects and scenes. For instance, models need to comprehend "an old rocking chair" even if this specific type of chair never appeared in the training data. + +Expanding Applications for Autonomous Systems. 3D-aware LLMs hold potential in robotics (navigation, manipulation), medical imaging (lesion detection), architectural design, and interactive education. Future systems may integrate environmental constraints, user perspectives, and object affordances for autonomous planning and decision-making in dynamic 3D contexts. + +Collectively, these challenges and potential directions underscore the field's rapid evolution and its equally significant open questions. Moving forward, more robust 3D-specific data resources, better model architectures, and more refined evaluation protocols will be essential to unlock the full potential of LLMs in three-dimensional settings—and ultimately bring intelligent, multimodal understanding closer to real-world deployment. + +# 6 Conclusion + +The integration of LLMs with 3D data is a dynamic research area. This survey categorized 3D-LLM research into image-based, point cloud-based, and hybrid modality-based spatial reasoning. It reviewed state-of-the-art methods, their applications in multiple fields, and associated challenges. Notably, image-based methods have data-related advantages but face issues like depth information shortage. Point cloud-based methods offer precise 3D details but encounter data-handling difficulties. Hybrid methods combine strengths yet struggle with data alignment. Applications are diverse, but challenges such as weak spatial perception, data scarcity, and evaluation problems exist. Future research should focus on enhancing 3D perception, improving multi-modal fusion, expanding generalization, developing evaluation metrics, enhancing instruction understanding, optimizing 3D representations, and exploring continuous learning. By addressing these, we can unlock the full potential of 3D-aware LLMs for real-world deployment and industry advancement. + +# References + +Fan Bai, Yuxin Du, Tiejun Huang, Max Q-H Meng, and Bo Zhao. M3d: Advancing 3d medical image analysis with multi-modal large language models. arXiv preprint arXiv:2404.00578, 2024. +Boyuan Chen, Zhuo Xu, Sean Kirmani, Brain Ichter, Dorsa Sadigh, Leonidas Guibas, and Fei Xia. Spatialvlm: Endowing vision-language models with spatial reasoning capabilities. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14455-14465, 2024. +Hao Chen, Wei Zhao, Yingli Li, Tianyang Zhong, Yisong Wang, Youlan Shang, Lei Guo, Junwei Han, Tianming Liu, Jun Liu, et al. 3d-ct-gpt: Generating 3d radiology reports through integration of large vision-language models. arXiv preprint arXiv:2409.19330, 2024. +Sijin Chen, Xin Chen, Chi Zhang, Mingsheng Li, Gang Yu, Hao Fei, Hongyuan Zhu, Jiayuan Fan, and Tao Chen. Ll3da: Visual interactive instruction tuning for omni-3d understanding reasoning and planning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26428-26438, 2024. +Yilun Chen, Shuai Yang, Haifeng Huang, Tai Wang, Ruiyuan Lyu, Runsen Xu, Dahua Lin, and Jiangmiao Pang. Grounded 3d-llm with referent tokens. arXiv preprint arXiv:2405.10370, 2024. +Jiajun Deng, Tianyu He, Li Jiang, Tianyu Wang, Feras Dayoub, and Ian Reid. 3d-llava: Towards generalist 3d lmm's with omni superpoint transformer. arXiv preprint arXiv:2501.01163, 2025. +Junjie Fei, Mahmoud Ahmed, Jian Ding, Eslam Mohamed Bakr, and Mohamed Elhoseiny. Kestrel: Point grounding multimodal llm for part-aware 3d vision-language understanding. arXiv preprint arXiv:2405.18937, 2024. +Rao Fu, Jingyu Liu, Xilun Chen, Yixin Nie, and Wenhan Xiong. Scene-llm: Extending language model for 3d visual understanding and reasoning. arXiv preprint arXiv:2403.11401, 2024. +Chen Gao, Baining Zhao, Weichen Zhang, Jinzhu Mao, Jun Zhang, Zhiheng Zheng, Fanhang Man, Jianjie Fang, Zile Zhou, Jinqiang Cui, et al. Embodiedcity: A benchmark platform for embodied agent in real-world city environment. arXiv preprint arXiv:2410.09604, 2024. +Qiao Gu, Ali Kuwajerwala, Sacha Morin, Krishna Murthy Jatavallabhula, Bipasha Sen, Aditya Agarwal, Corban Rivera, William Paul, Kirsty Ellis, Rama Chellappa, et al. Conceptgraphs: Open-vocabulary 3d scene graphs for perception and planning. In 2024 IEEE International Conference on Robotics and Automation (ICRA), pages 5021-5028. IEEE, 2024. +Ziyu Guo, Renrui Zhang, Xiangyang Zhu, Yiwen Tang, Xi-anzheng Ma, Jiaming Han, Kexin Chen, Peng Gao, Xi-anzhi Li, Hongsheng Li, et al. Point-bind & point-llm: Aligning point cloud with multi-modality for 3d understanding, generation, and instruction following. arXiv preprint arXiv:2309.00615, 2023. + +Shuting He, Henghui Ding, Xudong Jiang, and Bihan Wen. Segpoint: Segment any point cloud via large language model. In European Conference on Computer Vision, pages 349-367. Springer, 2024. +Yining Hong, Haoyu Zhen, Peihao Chen, Shuhong Zheng, Yilun Du, Zhenfang Chen, and Chuang Gan. 3d-llm: Injecting the 3d world into large language models. Advances in Neural Information Processing Systems, 36:20482-20494, 2023. +Yining Hong, Zishuo Zheng, Peihao Chen, Yian Wang, Junyan Li, and Chuang Gan. Multiply: A multisensory object-centric embodied large language model in 3d world. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26406-26416, 2024. +Haifeng Huang, Yilun Chen, Zehan Wang, Rongjie Huang, Runsen Xu, Tai Wang, Luping Liu, Xize Cheng, Yang Zhao, Jiangmiao Pang, et al. Chat-scene: Bridging 3d scene and large language models with object identifiers. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. +Jiayi Ji, Haowei Wang, Changli Wu, Yiwei Ma, Xiaoshuai Sun, and Rongrong Ji. Jm3d & jm3d-llm: Elevating 3d representation with joint multi-modal cues. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. +Baoxiong Jia, Yixin Chen, Huangyue Yu, Yan Wang, Xuesong Niu, Tengyu Liu, Qing Li, and Siyuan Huang. Sceneverse: Scaling 3d vision-language learning for grounded scene understanding. In European Conference on Computer Vision, pages 289-310. Springer, 2024. +Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 2019. +Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In International conference on machine learning, pages 12888-12900. PMLR, 2022. +Dingning Liu, Xiaoshui Huang, Zhihui Wang, Zhenfei Yin, Peng Gao, Yujiao Wu, Yuenan Hou, Xinzhu Ma, and Wanli Ouyang. Pointmllm: Aligning multi-modality with llm for point cloud understanding, generation and editing. +Che Liu, Zhongwei Wan, Yuqi Wang, Hui Shen, Haozhe Wang, Kangyu Zheng, Mi Zhang, and Rossella Arcucci. Benchmarking and boosting radiology report generation for 3d high-resolution medical images. arXiv preprint arXiv:2406.07146, 2024. +Dingning Liu, Xiaoshui Huang, Yuenan Hou, Zhihui Wang, Zhenfei Yin, Yongshun Gong, Peng Gao, and Wanli Ouyang. Uni3d-llm: Unifying point cloud perception, generation and editing with large language models. arXiv preprint arXiv:2402.03327, 2024. +Chenyang Ma, Kai Lu, Ta-Ying Cheng, Niki Trigoni, and Andrew Markham. Spatialpin: Enhancing spatial reasoning + +capabilities of vision-language models through prompting and interacting 3d priors. arXiv preprint arXiv:2403.13438, 2024. +Xianzheng Ma, Yash Bhalgat, Brandon Smart, Shuai Chen, Xinghui Li, Jian Ding, Jindong Gu, Dave Zhenyu Chen, Songyou Peng, Jia-Wang Bian, et al. When llms step into the 3d world: A survey and meta-analysis of 3d tasks via multi-modal large language models. arXiv preprint arXiv:2405.10255, 2024. +Yuexin Ma, Tai Wang, Xuyang Bai, Huitong Yang, Yuenan Hou, Yaming Wang, Yu Qiao, Ruigang Yang, and Xinge Zhu. Vision-centric bev perception: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. +Yunze Man, Liang-Yan Gui, and Yu-Xiong Wang. Situational awareness matters in 3d vision language reasoning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13678-13688, 2024. +Zekun Qi, Runpei Dong, Shaochen Zhang, Haoran Geng, Chunrui Han, Zheng Ge, Li Yi, and Kaiheng Ma. Shapellm: Universal 3d object understanding for embodied interaction. In European Conference on Computer Vision, pages 214-238. Springer, 2024. +Zhangyang Qi, Ye Fang, Zeyi Sun, Xiaoyang Wu, Tong Wu, Jiaqi Wang, Dahua Lin, and Hengshuang Zhao. Gpt4point: A unified framework for point-language understanding and generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26417-26427, 2024. +Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. +Alec Radford. Improving language understanding by generative pre-training. 2018. +Yuan Tang, Xu Han, Xianzhi Li, Qiao Yu, Yixue Hao, Long Hu, and Min Chen. Minigpt-3d: Efficiently aligning 3d point clouds with large language models using 2d priors. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 6617-6626, 2024. +Yuan Tang, Xu Han, Xianzhi Li, Qiao Yu, Jinfeng Xu, Yixue Hao, Long Hu, and Min Chen. More text, less point: Towards 3d data-efficient point-language understanding. arXiv preprint arXiv:2408.15966, 2024. +Zehan Wang, Haifeng Huang, Yang Zhao, Ziang Zhang, and Zhou Zhao. Chat-3d: Data-efficiently tuning large language model for universal dialogue of 3d scenes. arXiv preprint arXiv:2308.08769, 2023. +Xiaosong Wang, Xiaofan Zhang, Guotai Wang, Junjun He, Zhongyu Li, Wentao Zhu, Yi Guo, Qi Dou, Xiaoxiao Li, Dequan Wang, et al. Openmedlab: An open-source platform for multi-modality foundation models in medicine. arXiv preprint arXiv:2402.18028, 2024. + +Runsen Xu, Xiaolong Wang, Tai Wang, Yilun Chen, Jiangmiao Pang, and Dahua Lin. Pointllm: Empowering large language models to understand point clouds. In European Conference on Computer Vision, pages 131-147. Springer, 2025. +Senqiao Yang, Jiaming Liu, Ray Zhang, Mingjie Pan, Zoey Guo, Xiaqi Li, Zehui Chen, Peng Gao, Yandong Guo, and Shanghang Zhang. Lidar-llm: Exploring the potential of large language models for 3d lidar understanding. arXiv preprint arXiv:2312.14074, 2023. +Fan Yang, Sicheng Zhao, Yanhao Zhang, Haoxiang Chen, Hui Chen, Wenbo Tang, Haonan Lu, Pengfei Xu, Zhenyu Yang, Jungong Han, et al. Llmi3d: Empowering llm with 3d perception from a single 2d image. arXiv preprint arXiv:2408.07422, 2024. +Zhihao Yuan, Jinke Ren, Chun-Mei Feng, Hengshuang Zhao, Shuguang Cui, and Zhen Li. Visual programming for zero-shot open-vocabulary 3d visual grounding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20623-20633, 2024. +Jirong Zha, Liang Han, Xiwang Dong, and Zhang Ren. Privacy-preserving push-sum distributed cubature information filter for nonlinear target tracking with switching directed topologies. ISA transactions, 136:16-30, 2023. +Jirong Zha, Nan Zhou, Zhenyu Liu, Tao Sun, and Xinlei Chen. Diffusion-based filter for fast and accurate collaborative tracking with low data transmission. Authorea Preprints, 2024. +Renrui Zhang, Ziyu Guo, Wei Zhang, Kunchang Li, Xupeng Miao, Bin Cui, Yu Qiao, Peng Gao, and Hongsheng Li. Pointclip: Point cloud understanding by clip. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8552-8562, 2022. +Sha Zhang, Di Huang, Jiajun Deng, Shixiang Tang, Wanli Ouyang, Tong He, and Yanyong Zhang. Agent3d-zero: An agent for zero-shot 3d understanding. In European Conference on Computer Vision, pages 186-202. Springer, 2024. +Junsheng Zhou, Jinsheng Wang, Baorui Ma, Yu-Shen Liu, Tiejun Huang, and Xinlong Wang. Uni3d: Exploring unified 3d representation at scale. arXiv preprint arXiv:2310.06773, 2023. +Xiangyang Zhu, Renrui Zhang, Bowei He, Ziyu Guo, Ziyao Zeng, Zipeng Qin, Shanghang Zhang, and Peng Gao. Pointclip v2: Prompting clip and gpt for powerful 3d open-world learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2639-2650, 2023. +Chenming Zhu, Tai Wang, Wenwei Zhang, Kai Chen, and Xihui Liu. Scanreason: Empowering 3d visual grounding with reasoning capabilities. In European Conference on Computer Vision, pages 151-168. Springer, 2024. +Chenming Zhu, Tai Wang, Wenwei Zhang, Jiangmiao Pang, and Xihui Liu. Llava-3d: A simple yet effective pathway to empowering lmm with 3d-awareness. arXiv preprint arXiv:2409.18125, 2024. \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05786/images/01a7846f1deba904180c76b95b69058c7981b82fbb85fd782f363da73b4c3476.jpg b/data/2025/2504_05xxx/2504.05786/images/01a7846f1deba904180c76b95b69058c7981b82fbb85fd782f363da73b4c3476.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f16f85d5b1576d7bd9d6db647476bf6b9591445e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05786/images/01a7846f1deba904180c76b95b69058c7981b82fbb85fd782f363da73b4c3476.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e93860ae5b5285bcde1b129f2fc4a7eeccd2eafe14e53d2218a60ae2bdc9c363 +size 54344 diff --git a/data/2025/2504_05xxx/2504.05786/images/10f4e27138d77cef1e66632497ab60fcb460eb82533892d1e5d74ab2bb75012d.jpg b/data/2025/2504_05xxx/2504.05786/images/10f4e27138d77cef1e66632497ab60fcb460eb82533892d1e5d74ab2bb75012d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..173a755e23471ace1527da3010619ee7d32154a4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05786/images/10f4e27138d77cef1e66632497ab60fcb460eb82533892d1e5d74ab2bb75012d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fafdce046b07db2130a4d77ad192c867c1c8bc5c560533412ad785cbcc0b0bda +size 155601 diff --git a/data/2025/2504_05xxx/2504.05786/images/1bf7f57265e65d413e42b9341fc93ceead1023f71cc6b80ce679fd74fcaec139.jpg b/data/2025/2504_05xxx/2504.05786/images/1bf7f57265e65d413e42b9341fc93ceead1023f71cc6b80ce679fd74fcaec139.jpg new file mode 100644 index 0000000000000000000000000000000000000000..948238f1b16a1b33e571822e17abf1e644d5e521 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05786/images/1bf7f57265e65d413e42b9341fc93ceead1023f71cc6b80ce679fd74fcaec139.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7faa58b8e9f594ff92b3615e5d9b257b1a8bd60c0c5d7b29f2e1c8685be56d90 +size 3566 diff --git a/data/2025/2504_05xxx/2504.05786/images/1c4b8c1a8e39901fceb895fcf642206dfa4b227423055dcf9d75196664ca28d0.jpg b/data/2025/2504_05xxx/2504.05786/images/1c4b8c1a8e39901fceb895fcf642206dfa4b227423055dcf9d75196664ca28d0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d5930805042d877283df8f805cd92940aba9c696 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05786/images/1c4b8c1a8e39901fceb895fcf642206dfa4b227423055dcf9d75196664ca28d0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae23458659e5dcebfb13e5dae6e5018570a29ee651e295cfd240d3e105481a81 +size 36277 diff --git a/data/2025/2504_05xxx/2504.05786/images/6f283c8ccff38019b629c7d3baf89d8f01eae9a6757bd3db846940b8dbae1d64.jpg b/data/2025/2504_05xxx/2504.05786/images/6f283c8ccff38019b629c7d3baf89d8f01eae9a6757bd3db846940b8dbae1d64.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8dd04b4db980b309f553fd56331fd4cbfa8e9ea4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05786/images/6f283c8ccff38019b629c7d3baf89d8f01eae9a6757bd3db846940b8dbae1d64.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a00e372f4b4b6d00c90e78867e1efcad5798514593ae0a191b7927f8014d815f +size 43311 diff --git a/data/2025/2504_05xxx/2504.05786/images/d940e65bdb4924227a21274aee6d75d1ec4fbdde1933224aff9d92e6f49a75fb.jpg b/data/2025/2504_05xxx/2504.05786/images/d940e65bdb4924227a21274aee6d75d1ec4fbdde1933224aff9d92e6f49a75fb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e8d31fb47208d75700ab1cdb8fd1cbf18930d666 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05786/images/d940e65bdb4924227a21274aee6d75d1ec4fbdde1933224aff9d92e6f49a75fb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd38a107ee38656537461d7db0d98b1c3d4dabe27d5eb4cca474ca09472f5d25 +size 234695 diff --git a/data/2025/2504_05xxx/2504.05786/images/e0e4442a4b46313dc44cf0f5c9bff8b0f952923527a0c8287f8b90fc3e5eaf75.jpg b/data/2025/2504_05xxx/2504.05786/images/e0e4442a4b46313dc44cf0f5c9bff8b0f952923527a0c8287f8b90fc3e5eaf75.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3bcbcca55c73f806c60f26ad58a2d78ee2b5bafd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05786/images/e0e4442a4b46313dc44cf0f5c9bff8b0f952923527a0c8287f8b90fc3e5eaf75.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afae7feeaf22865158d13633ae99dc1da58536637487f848012405bba649167b +size 2433 diff --git a/data/2025/2504_05xxx/2504.05786/images/f869e9459876e49023165cdcf85439fb6449fd17975fe58ef81ce28fb4e6e702.jpg b/data/2025/2504_05xxx/2504.05786/images/f869e9459876e49023165cdcf85439fb6449fd17975fe58ef81ce28fb4e6e702.jpg new file mode 100644 index 0000000000000000000000000000000000000000..24ff9bc3505f9c3dc5da11a2eb1f639c651a6ccc --- /dev/null +++ b/data/2025/2504_05xxx/2504.05786/images/f869e9459876e49023165cdcf85439fb6449fd17975fe58ef81ce28fb4e6e702.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45aafc26c117d72769965f336d7dde1eb862d3fe7c5ce511c80adc9e733cefa1 +size 58483 diff --git a/data/2025/2504_05xxx/2504.05786/layout.json b/data/2025/2504_05xxx/2504.05786/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..532723da9eb651e1bb7944d9de616cce26323462 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05786/layout.json @@ -0,0 +1,5359 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 60, + 86, + 550, + 104 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 86, + 550, + 104 + ], + "spans": [ + { + "bbox": [ + 60, + 86, + 550, + 104 + ], + "type": "text", + "content": "How to Enable LLM with 3D Capacity? A Survey of Spatial Reasoning in LLM" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 118, + 117, + 489, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 117, + 489, + 133 + ], + "spans": [ + { + "bbox": [ + 118, + 117, + 489, + 133 + ], + "type": "text", + "content": "Jirong Zha" + }, + { + "bbox": [ + 118, + 117, + 489, + 133 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 118, + 117, + 489, + 133 + ], + "type": "text", + "content": ", Yuxuan Fan" + }, + { + "bbox": [ + 118, + 117, + 489, + 133 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 118, + 117, + 489, + 133 + ], + "type": "text", + "content": ", Xiao Yang" + }, + { + "bbox": [ + 118, + 117, + 489, + 133 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 118, + 117, + 489, + 133 + ], + "type": "text", + "content": ", Chen Gao" + }, + { + "bbox": [ + 118, + 117, + 489, + 133 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + }, + { + "bbox": [ + 118, + 117, + 489, + 133 + ], + "type": "text", + "content": ", Xinlei Chen" + }, + { + "bbox": [ + 118, + 117, + 489, + 133 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 253, + 135, + 357, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 135, + 357, + 149 + ], + "spans": [ + { + "bbox": [ + 253, + 135, + 357, + 149 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 253, + 135, + 357, + 149 + ], + "type": "text", + "content": "Tsinghua University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 135, + 150, + 476, + 164 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 150, + 476, + 164 + ], + "spans": [ + { + "bbox": [ + 135, + 150, + 476, + 164 + ], + "type": "text", + "content": "2The Hong Kong University of Science and Technology (Guang Zhou)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 99, + 166, + 512, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 166, + 512, + 193 + ], + "spans": [ + { + "bbox": [ + 99, + 166, + 512, + 193 + ], + "type": "text", + "content": "zhajirong23@mails.tsinghua.edu.cn, {yfan546, xyang856}@connect.hkust-gz.edu.cn, chgao96@gmail.com, chen.xinlei@sz.tsinghua.edu.cn" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 150, + 215, + 198, + 228 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 215, + 198, + 228 + ], + "spans": [ + { + "bbox": [ + 150, + 215, + 198, + 228 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 236, + 279, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 236, + 279, + 501 + ], + "spans": [ + { + "bbox": [ + 70, + 236, + 279, + 501 + ], + "type": "text", + "content": "3D spatial understanding is essential in real-world applications such as robotics, autonomous vehicles, virtual reality, and medical imaging. Recently, Large Language Models (LLMs), having demonstrated remarkable success across various domains, have been leveraged to enhance 3D understanding tasks, showing potential to surpass traditional computer vision methods. In this survey, we present a comprehensive review of methods integrating LLMs with 3D spatial understanding. We propose a taxonomy that categorizes existing methods into three branches: image-based methods deriving 3D understanding from 2D visual data, point cloud-based methods working directly with 3D representations, and hybrid modality-based methods combining multiple data streams. We systematically review representative methods along these categories, covering data representations, architectural modifications, and training strategies that bridge textual and 3D modalities. Finally, we discuss current limitations, including dataset scarcity and computational challenges, while highlighting promising research directions in spatial perception, multi-modal fusion, and real-world applications." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 521, + 137, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 521, + 137, + 533 + ], + "spans": [ + { + "bbox": [ + 51, + 521, + 137, + 533 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 539, + 299, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 539, + 299, + 704 + ], + "spans": [ + { + "bbox": [ + 50, + 539, + 299, + 704 + ], + "type": "text", + "content": "Large Language Models (LLMs) have evolved from basic neural networks to advanced transformer models like BERT [Kenton and Toutanova, 2019] and GPT [Radford, 2018], originally excelling at language tasks by learning from vast text datasets. Recent advancements, however, have extended these models beyond pure linguistic processing to encompass multimodal ability (In this paper, when we refer to LLMs, we specifically mean those that integrate multimodal functions). Their ability to capture complex patterns and relationships [Chen et al., 2024a] now holds promise for spatial reasoning tasks [Ma et al., 2024b]. By applying these enhanced models to challenges such as understanding 3D object relationships and spatial navigation, we open up new opportunities for advancing fields like robotics, computer vision, and augmented reality [Gao et al., 2024]." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 315, + 215, + 556, + 326 + ], + "blocks": [ + { + "bbox": [ + 315, + 215, + 556, + 326 + ], + "lines": [ + { + "bbox": [ + 315, + 215, + 556, + 326 + ], + "spans": [ + { + "bbox": [ + 315, + 215, + 556, + 326 + ], + "type": "image", + "image_path": "1c4b8c1a8e39901fceb895fcf642206dfa4b227423055dcf9d75196664ca28d0.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 311, + 335, + 559, + 376 + ], + "lines": [ + { + "bbox": [ + 311, + 335, + 559, + 376 + ], + "spans": [ + { + "bbox": [ + 311, + 335, + 559, + 376 + ], + "type": "text", + "content": "Figure 1: Large Language Models can acquire 3D spatial reasoning capabilities through various input sources including multi-view images, RGB-D images, point clouds, and hybrid modalities, enabling the processing and understanding of three-dimensional information." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 310, + 400, + 559, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 400, + 559, + 521 + ], + "spans": [ + { + "bbox": [ + 310, + 400, + 559, + 521 + ], + "type": "text", + "content": "At the same time, 3D data and 3D modeling techniques have seen significant developments [Ma et al., 2024c], finding extensive applications in virtual and augmented reality, robotics, autonomous vehicles, gaming, medical imaging, and more. Unlike traditional two-dimensional images, 3D data provides a richer view of objects and environments, capturing essential spatial relationships and geometry. Such information is critical for tasks like scene reconstruction, object manipulation, and autonomous navigation, where merely text-based descriptions or 2D representations may fall short of conveying the necessary depth or spatial context." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 310, + 528, + 560, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 528, + 560, + 706 + ], + "spans": [ + { + "bbox": [ + 310, + 528, + 560, + 706 + ], + "type": "text", + "content": "LLMs help Spatial Understanding. Bringing these two fields together—powerful language understanding from LLMs and the spatial realism of 3D data—offers the potential for highly capable, context-aware systems. From a linguistic perspective, real-world descriptions often reference physical arrangement, orientation, or manipulations of objects in space. Text alone can be imprecise or ambiguous about size, shape, or relative positioning unless one can integrate a robust spatial or visual understanding. Consequently, there is growing interest in enhancing LLMs with a \"3D capacity\" that enables them to interpret, reason, and even generate three-dimensional representations based on natural language prompts. Such an integrated approach opens up exciting prospects: robots that can follow language instructions more effectively by grounding their commands in 3D context, architects who quickly prototype 3D layouts from textual descriptions, game design" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "spans": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "type": "text", + "content": "arXiv:2504.05786v1 [cs.CV] 8 Apr 2025" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 54, + 298, + 87 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 54, + 298, + 87 + ], + "spans": [ + { + "bbox": [ + 50, + 54, + 298, + 87 + ], + "type": "text", + "content": "ers who generate immersive environments for narrative-based experiences, and many other creative applications yet to be envisioned." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 90, + 298, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 90, + 298, + 266 + ], + "spans": [ + { + "bbox": [ + 50, + 90, + 298, + 266 + ], + "type": "text", + "content": "Motivation. Although LLMs have been increasingly applied in 3D-related tasks, and Ma et al. [2024b] provided a systematic overview of this field, the rapid advancement of this domain has led to numerous new developments in recent months, necessitating an up-to-date survey that captures these recent breakthroughs. Integrating 3D capacity into LLMs faces several key challenges: (1) the scarcity of high-quality 3D datasets compared to abundant text corpora; (2) the fundamental mismatch between sequential text data and continuous 3D spatial structures, requiring specialized architectural adaptations; and (3) the intensive computational requirements for processing 3D data at scale. While early attempts at combining language and 3D have shown promise, current approaches often remain limited in scope, scalability, and generalization capability. Most existing solutions are domain-specific and lack the broad applicability characteristic of text-based LLMs." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 269, + 298, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 269, + 298, + 434 + ], + "spans": [ + { + "bbox": [ + 50, + 269, + 298, + 434 + ], + "type": "text", + "content": "Contribution. The contributions of this work are summarized in the following three aspects: (1) A structured taxonomy. We provide a timely and comprehensive survey that distinguishes itself from the systematic overview offered by Ma et al. [2024b] by presenting a novel perspective on LLM applications in 3D-related tasks: our work constructs a structured taxonomy that categorizes existing research into three primary groups (Figure 2) and offers a forward-looking analysis of the latest breakthroughs, thereby underscoring our unique contributions and the significance of our approach in advancing the field. (2) A comprehensive review. Building on the proposed taxonomy, we systematically review the current research progress on LLMs for spatial reasoning tasks. (3) Future directions. We highlight the remaining limitations of existing works and suggest potential directions for future research." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 445, + 134, + 458 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 445, + 134, + 458 + ], + "spans": [ + { + "bbox": [ + 51, + 445, + 134, + 458 + ], + "type": "text", + "content": "2 Preliminary" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 460, + 192, + 472 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 460, + 192, + 472 + ], + "spans": [ + { + "bbox": [ + 51, + 460, + 192, + 472 + ], + "type": "text", + "content": "2.1 Large Language Models" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 474, + 298, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 474, + 298, + 562 + ], + "spans": [ + { + "bbox": [ + 50, + 474, + 298, + 562 + ], + "type": "text", + "content": "Large Language Models (LLMs) have evolved from early word embeddings to context-aware models like BERT [Kenton and Toutanova, 2019]. Generative transformers such as GPT series [Radford, 2018], have further advanced text generation and few-shot learning. However, these models often struggle with spatial reasoning due to their focus on textual patterns, prompting efforts to integrate external spatial knowledge [Fu et al., 2024]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 562, + 298, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 562, + 298, + 662 + ], + "spans": [ + { + "bbox": [ + 50, + 562, + 298, + 662 + ], + "type": "text", + "content": "Vision-Language Models (VLMs) extend LLMs by aligning visual data with text. Early examples like CLIP [Radford et al., 2021] leverage co-attentional architectures and contrastive learning, while later models such as BLIP [Li et al., 2022] refine these techniques with larger datasets. Yet, most VLMs process only 2D data, limiting their ability to capture detailed 3D spatial configurations. Integrating 3D context via depth maps, point clouds, or voxels remains challenging, motivating ongoing research toward more robust spatial intelligence." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 668, + 169, + 679 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 668, + 169, + 679 + ], + "spans": [ + { + "bbox": [ + 51, + 668, + 169, + 679 + ], + "type": "text", + "content": "2.2 3D Data Structures" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 681, + 298, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 681, + 298, + 704 + ], + "spans": [ + { + "bbox": [ + 51, + 681, + 298, + 704 + ], + "type": "text", + "content": "3D data has different structures, which are essential for understanding the three-dimensional world, and common methods" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 310, + 54, + 559, + 99 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 54, + 559, + 99 + ], + "spans": [ + { + "bbox": [ + 310, + 54, + 559, + 99 + ], + "type": "text", + "content": "include point clouds, voxel grids, polygonal meshes, neural fields, hybrid representations, and 3D Gaussian splatting. Point clouds represent shapes using discrete points, typically denoted as" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 370, + 102, + 499, + 118 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 102, + 499, + 118 + ], + "spans": [ + { + "bbox": [ + 370, + 102, + 499, + 118 + ], + "type": "interline_equation", + "content": "P = \\left\\{p _ {i} \\in \\mathbb {R} ^ {3} \\mid i = 1, \\dots , N \\right\\},", + "image_path": "1bf7f57265e65d413e42b9341fc93ceead1023f71cc6b80ce679fd74fcaec139.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 311, + 121, + 558, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 121, + 558, + 232 + ], + "spans": [ + { + "bbox": [ + 311, + 121, + 558, + 232 + ], + "type": "text", + "content": "which are storage-efficient but lack surface topology. Voxel grids partition space into uniform cubes, with each voxel " + }, + { + "bbox": [ + 311, + 121, + 558, + 232 + ], + "type": "inline_equation", + "content": "V(i,j,k)" + }, + { + "bbox": [ + 311, + 121, + 558, + 232 + ], + "type": "text", + "content": " storing occupancy or distance values, providing detailed structure at the expense of increased memory usage at higher resolutions. Polygonal meshes compactly encode complex geometries through a set of vertices " + }, + { + "bbox": [ + 311, + 121, + 558, + 232 + ], + "type": "inline_equation", + "content": "\\{v_{i}\\}" + }, + { + "bbox": [ + 311, + 121, + 558, + 232 + ], + "type": "text", + "content": " and faces " + }, + { + "bbox": [ + 311, + 121, + 558, + 232 + ], + "type": "inline_equation", + "content": "\\{F_j\\}" + }, + { + "bbox": [ + 311, + 121, + 558, + 232 + ], + "type": "text", + "content": ", though their unstructured and non-differentiable nature poses challenges for integration with neural networks. Neural fields offer an implicit approach by modeling 3D shapes as continuous and differentiable functions, such as" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 398, + 236, + 471, + 249 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 398, + 236, + 471, + 249 + ], + "spans": [ + { + "bbox": [ + 398, + 236, + 471, + 249 + ], + "type": "interline_equation", + "content": "f _ {\\theta}: \\mathbb {R} ^ {3} \\rightarrow (c, \\sigma),", + "image_path": "e0e4442a4b46313dc44cf0f5c9bff8b0f952923527a0c8287f8b90fc3e5eaf75.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 310, + 253, + 559, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 253, + 559, + 364 + ], + "spans": [ + { + "bbox": [ + 310, + 253, + 559, + 364 + ], + "type": "text", + "content": "which maps spatial coordinates to color " + }, + { + "bbox": [ + 310, + 253, + 559, + 364 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 310, + 253, + 559, + 364 + ], + "type": "text", + "content": " and density " + }, + { + "bbox": [ + 310, + 253, + 559, + 364 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 310, + 253, + 559, + 364 + ], + "type": "text", + "content": ". Hybrid representations combine these neural fields with traditional volumetric methods (e.g., integrating " + }, + { + "bbox": [ + 310, + 253, + 559, + 364 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 310, + 253, + 559, + 364 + ], + "type": "text", + "content": " with voxel grids) to achieve high-quality, real-time rendering. Meanwhile, 3D Gaussian splatting enhances point clouds by associating each point " + }, + { + "bbox": [ + 310, + 253, + 559, + 364 + ], + "type": "inline_equation", + "content": "p_i" + }, + { + "bbox": [ + 310, + 253, + 559, + 364 + ], + "type": "text", + "content": " with a covariance matrix " + }, + { + "bbox": [ + 310, + 253, + 559, + 364 + ], + "type": "inline_equation", + "content": "\\Sigma_i" + }, + { + "bbox": [ + 310, + 253, + 559, + 364 + ], + "type": "text", + "content": " and color " + }, + { + "bbox": [ + 310, + 253, + 559, + 364 + ], + "type": "inline_equation", + "content": "c_i" + }, + { + "bbox": [ + 310, + 253, + 559, + 364 + ], + "type": "text", + "content": ", efficiently encoding radiance information for rendering. Each method has its unique strengths and trade-offs, making them suitable for different applications in 3D understanding and generation." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 311, + 371, + 432, + 384 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 371, + 432, + 384 + ], + "spans": [ + { + "bbox": [ + 311, + 371, + 432, + 384 + ], + "type": "text", + "content": "2.3 Proposed taxonomy" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 310, + 386, + 559, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 386, + 559, + 671 + ], + "spans": [ + { + "bbox": [ + 310, + 386, + 559, + 671 + ], + "type": "text", + "content": "We propose a taxonomy that classifies 3D-LLM research into three main categories based on input modalities and integration strategies, as shown in Figure 1: Image-based spatial reasoning encompasses approaches that derive 3D understanding from 2D images. This includes multi-view methods that reconstruct 3D scenes, RGB-D images providing explicit depth information, monocular 3D perception inferring depth from single views, and medical imaging applications. While these approaches benefit from readily available image data and existing vision models, they may struggle with occlusions and viewpoint limitations. Point cloud-based spatial reasoning works directly with 3D point cloud data through three alignment strategies: (1) Direct alignment that immediately connects point features with language embeddings, (2) Step-by-step alignment that follows sequential stages to bridge modalities, and (3) Task-specific alignment customized for particular spatial reasoning requirements. These methods maintain geometric fidelity but face challenges in handling unstructured 3D data. Hybrid modality-based spatial reasoning combines multiple data streams through either tightly or loosely coupled architectures. Tightly coupled approaches integrate modalities through shared embeddings or end-to-end training, while loosely coupled methods maintain modular components with defined interfaces between them. This enables leveraging complementary strengths across modalities but increases architectural complexity." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 310, + 670, + 559, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 670, + 559, + 704 + ], + "spans": [ + { + "bbox": [ + 310, + 670, + 559, + 704 + ], + "type": "text", + "content": "This taxonomy provides a structured framework for understanding the diverse technical approaches in the field while highlighting the distinct challenges and trade-offs each branch" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 57, + 52, + 553, + 255 + ], + "blocks": [ + { + "bbox": [ + 57, + 52, + 553, + 255 + ], + "lines": [ + { + "bbox": [ + 57, + 52, + 553, + 255 + ], + "spans": [ + { + "bbox": [ + 57, + 52, + 553, + 255 + ], + "type": "image", + "image_path": "10f4e27138d77cef1e66632497ab60fcb460eb82533892d1e5d74ab2bb75012d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 261, + 558, + 282 + ], + "lines": [ + { + "bbox": [ + 50, + 261, + 558, + 282 + ], + "spans": [ + { + "bbox": [ + 50, + 261, + 558, + 282 + ], + "type": "text", + "content": "Figure 2: A Taxonomy of Models for Spatial Reasoning with LLMs: Image-based, Point Cloud-based, and Hybrid Modality-based Approaches and Their Subdivisions." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 53, + 300, + 298, + 459 + ], + "blocks": [ + { + "bbox": [ + 53, + 300, + 298, + 459 + ], + "lines": [ + { + "bbox": [ + 53, + 300, + 298, + 459 + ], + "spans": [ + { + "bbox": [ + 53, + 300, + 298, + 459 + ], + "type": "image", + "image_path": "f869e9459876e49023165cdcf85439fb6449fd17975fe58ef81ce28fb4e6e702.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 80, + 468, + 268, + 480 + ], + "lines": [ + { + "bbox": [ + 80, + 468, + 268, + 480 + ], + "spans": [ + { + "bbox": [ + 80, + 468, + 268, + 480 + ], + "type": "text", + "content": "Figure 3: An overview of image-based approaches." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 498, + 299, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 498, + 299, + 521 + ], + "spans": [ + { + "bbox": [ + 50, + 498, + 299, + 521 + ], + "type": "text", + "content": "must address. Figure 2 presents a detailed breakdown of representative works in each category." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 533, + 281, + 559 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 533, + 281, + 559 + ], + "spans": [ + { + "bbox": [ + 51, + 533, + 281, + 559 + ], + "type": "text", + "content": "3 Recent Advances of Spatial Reasoning in LLM" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 566, + 226, + 580 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 566, + 226, + 580 + ], + "spans": [ + { + "bbox": [ + 51, + 566, + 226, + 580 + ], + "type": "text", + "content": "3.1 Image-based Spatial Reasoning" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 582, + 299, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 582, + 299, + 704 + ], + "spans": [ + { + "bbox": [ + 50, + 582, + 299, + 704 + ], + "type": "text", + "content": "Image-based spatial reasoning methods can be categorized based on their input modalities: multi-view images, monocular images, RGB-D images, and 3D medical images shown in Figure 3. Each modality offers unique advantages for enhancing 3D understanding in Large Language Models (LLMs). Multi-view images provide spatial data from different perspectives, monocular images extract 3D insights from a single view, RGB-D images incorporate depth information, and 3D medical images address domain-specific challenges in healthcare. These categories highlight the strengths and challenges of each approach in improving spatial reasoning capabilities." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 311, + 301, + 462, + 313 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 301, + 462, + 313 + ], + "spans": [ + { + "bbox": [ + 311, + 301, + 462, + 313 + ], + "type": "text", + "content": "3.1.1 Multi-view Images as input" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 310, + 314, + 560, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 314, + 560, + 654 + ], + "spans": [ + { + "bbox": [ + 310, + 314, + 560, + 654 + ], + "type": "text", + "content": "Several studies explore multi-view images to enhance LLMs' spatial understanding. LLaVA-3D Zhu et al. [2024b] leverages multi-view images and 3D positional embeddings to create 3D Patches, achieving state-of-the-art 3D spatial understanding while maintaining 2D image understanding capabilities. Agent3D-Zero Zhang et al. [2024] utilizes multiple images from different viewpoints, enabling VLMs to perform robust reasoning and understand spatial relationships, achieving zero-shot scene understanding. ShapeLLM Qi et al. [2024a] also uses multi-view image input, with robustness to occlusions. Scene-LLM Fu et al. [2024] uses multi-view images to build 3D feature representations, incorporating scene-level and egocentric 3D information to support interactive planning. SpatialPIN Ma et al. [2024a] enhances VLM's spatial reasoning by decomposing, understanding and reconstructing explicit 3D representations from multi-view images and generalizes to various 3D tasks. LLMI3D Yang et al. [2024] extracts spatially enhanced local features from high-resolution images using CNNs and a depth predictor and uses ViT to obtain tokens from low-resolution images. It employs a spatially enhanced cross-branch attention mechanism to effectively mine spatial local features of objects and uses geometric projection to handle. Extracting multi-view features results in huge computational overhead and ignores the essential geometry and depth information. Additionally, plain texts often lead to ambiguities especially in cluttered and complex 3D environmentsChen et al. [2024c]. ConceptGraphs Gu et al. [2024] proposes a graph-structured representation for 3D scenes that operates with an open vocabulary, which is developed by utilizing 2D foundation models and integrating their outputs into a 3D format through multiview association." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 311, + 658, + 458, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 658, + 458, + 670 + ], + "spans": [ + { + "bbox": [ + 311, + 658, + 458, + 670 + ], + "type": "text", + "content": "3.1.2 Monocular Image as input" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 310, + 670, + 559, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 670, + 559, + 704 + ], + "spans": [ + { + "bbox": [ + 310, + 670, + 559, + 704 + ], + "type": "text", + "content": "LLMI3D Yang et al. [2024] uses a single 2D image for 3D perception, enhancing performance through spatial local feature mining, 3D query token decoding, and geometry-based" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 54, + 298, + 110 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 54, + 298, + 110 + ], + "spans": [ + { + "bbox": [ + 50, + 54, + 298, + 110 + ], + "type": "text", + "content": "3D reasoning. It uses a depth predictor and CNN to extract spatial local features and uses learnable 3D query tokens for geometric coordinate regression. It combines black-box networks and white-box projection to address changes in camera focal lengths." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 51, + 115, + 183, + 127 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 115, + 183, + 127 + ], + "spans": [ + { + "bbox": [ + 51, + 115, + 183, + 127 + ], + "type": "text", + "content": "3.1.3 RGB-D Image as Input" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 128, + 298, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 128, + 298, + 204 + ], + "spans": [ + { + "bbox": [ + 50, + 128, + 298, + 204 + ], + "type": "text", + "content": "Depth is estimated in SpatialPIN Ma et al. [2024a] by ZoeDepth when finding field of view (FOV) through perspective fields and provided for 3D-scene understanding and reconstruction. M3D-LaMed Bai et al. [2024] pre-trains the 3D medical vision encoder with medical image slices along depth and introduces end-to-end tuning to integrate 3D information into LLM." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 210, + 199, + 222 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 210, + 199, + 222 + ], + "spans": [ + { + "bbox": [ + 51, + 210, + 199, + 222 + ], + "type": "text", + "content": "3.1.4 3D Medical Image as input" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 222, + 299, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 222, + 299, + 453 + ], + "spans": [ + { + "bbox": [ + 50, + 222, + 299, + 453 + ], + "type": "text", + "content": "Unlike previous research focused on 2D medical images, integrating multi-modal other information such as textual descriptions, M3D-LaMed Bai et al. [2024] is specifically designed for 3D CT images by analyzing spatial features. It demonstrates excellent performance across multiple tasks, including image-text retrieval, report generation, visual question answering, localization, and segmentation. In order to generate radiology reports automatically, a brand-new framework Liu et al. [2024a] is proposed to employs low-resolution (LR) visual tokens as queries to extract information from high-resolution (HR) tokens, ensuring that detailed information is retained across HR volumes while minimizing computational costs by processing only the HR-informed LR visual queries. 3D-CT-GPT Chen et al. [2024b], based medical visual language model, is tailored for the generation of radiology reports from 3D CT scans, with a focus on chest CTs. OpenMEDLab Wang et al. [2024] comprises and publishes a variety of medical foundation models to process multi-modal medical data including Color Fundus Photography (CFP), Optical Coherence Tomography (OCT), endoscopy videos, CT&MR volumes and other pathology images." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 459, + 129, + 469 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 459, + 129, + 469 + ], + "spans": [ + { + "bbox": [ + 51, + 459, + 129, + 469 + ], + "type": "text", + "content": "3.1.5 Discussion" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 471, + 298, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 471, + 298, + 591 + ], + "spans": [ + { + "bbox": [ + 50, + 471, + 298, + 591 + ], + "type": "text", + "content": "Image-based spatial reasoning methods offer significant advantages, such as easy data acquisition and integration with pre-trained 2D models. Multi-view images provide rich spatial information, while depth estimation enhances scene understanding. However, challenges remain, including limited depth from single views, scale uncertainty, occlusion, and viewpoint dependency. These methods also face issues with visual hallucinations, generalization to novel scenes, and high computational costs. Future research should focus on improving multi-view integration and depth estimation to address these limitations." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 600, + 295, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 600, + 295, + 624 + ], + "spans": [ + { + "bbox": [ + 51, + 600, + 295, + 624 + ], + "type": "text", + "content": "3.2 Recent Advances of Point Cloud-based Spatial Reasoning" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 626, + 298, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 626, + 298, + 704 + ], + "spans": [ + { + "bbox": [ + 50, + 626, + 298, + 704 + ], + "type": "text", + "content": "As shown in Figure 4, point cloud-based spatial reasoning has advanced significantly in recent years, employing three main alignment methods: Direct, Step-by-step, and Task-specific Alignment. These methods are essential for integrating point cloud data with language models to enable effective spatial reasoning. Direct Alignment establishes immediate connections between point cloud features and language model em" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 317, + 53, + 554, + 194 + ], + "blocks": [ + { + "bbox": [ + 317, + 53, + 554, + 194 + ], + "lines": [ + { + "bbox": [ + 317, + 53, + 554, + 194 + ], + "spans": [ + { + "bbox": [ + 317, + 53, + 554, + 194 + ], + "type": "image", + "image_path": "01a7846f1deba904180c76b95b69058c7981b82fbb85fd782f363da73b4c3476.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 332, + 203, + 537, + 214 + ], + "lines": [ + { + "bbox": [ + 332, + 203, + 537, + 214 + ], + "spans": [ + { + "bbox": [ + 332, + 203, + 537, + 214 + ], + "type": "text", + "content": "Figure 4: An overview of point cloud-based approaches." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 310, + 230, + 558, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 230, + 558, + 285 + ], + "spans": [ + { + "bbox": [ + 310, + 230, + 558, + 285 + ], + "type": "text", + "content": "beddings, while Step-by-step Alignment follows a sequential process through multiple stages. Task-specific Alignment is customized for particular spatial reasoning requirements. The choice of method depends on specific application needs and constraints." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 311, + 291, + 419, + 302 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 291, + 419, + 302 + ], + "spans": [ + { + "bbox": [ + 311, + 291, + 419, + 302 + ], + "type": "text", + "content": "3.2.1 Direct Alignment" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 310, + 303, + 559, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 303, + 559, + 544 + ], + "spans": [ + { + "bbox": [ + 310, + 303, + 559, + 544 + ], + "type": "text", + "content": "Direct alignment methods create direct connections between point cloud data and language models. PointCLIP [Zhang et al., 2022] was a pioneer, projecting point clouds into multiview depth maps and using CLIP's pre-trained visual encoder for feature extraction, which was then aligned with textual features through a hand-crafted template. This approach showed promising results in zero-shot and few-shot classification tasks by transferring 2D knowledge to the 3D domain. PointCLIP V2 [Zhu et al., 2023] improved the projection quality with a realistic projection module and used GPT-3 for generating 3D-specific text descriptions, achieving better performance in zero-shot classification, part segmentation, and object detection. Chat-Scene [Huang et al., 2024] introduced object identifiers to facilitate object referencing during user-assistant interactions, representing scenes through object-centric embeddings. PointLLM [Xu et al., 2025] advanced the field by integrating a point cloud encoder with a powerful LLM, effectively fusing geometric, appearance, and linguistic information, and overcoming data scarcity with automated generation. These methods demonstrate the potential for effective 3D point cloud understanding through language models, enabling improved spatial reasoning and human-AI interaction." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 311, + 549, + 446, + 560 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 549, + 446, + 560 + ], + "spans": [ + { + "bbox": [ + 311, + 549, + 446, + 560 + ], + "type": "text", + "content": "3.2.2 Step-by-step Alignment" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 310, + 561, + 559, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 561, + 559, + 704 + ], + "spans": [ + { + "bbox": [ + 310, + 561, + 559, + 704 + ], + "type": "text", + "content": "Step-by-step alignment has gained popularity in integrating point cloud features with language models. Notable approaches include GPT4Point [Qi et al., 2024b], which uses a Bert-based Point-QFormer for point-text feature alignment, followed by object generation. Grounded 3D-LLMs [Chen et al., 2024d] first aligns 3D scene embeddings with textual descriptions via contrastive pre-training, then fine-tunes with referent tokens. LiDAR-LLMs [Yang et al., 2023] employ a three-stage process: cross-modal alignment, object-centric learning, and high-level instruction fine-tuning. MiniGPT-3D [Tang et al., 2024a] follows a four-stage strategy, from point cloud projection to advanced model enhancements using Mixture of Query Experts. GreenPLM [Tang et al., 2024b] uses" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 53, + 294, + 159 + ], + "blocks": [ + { + "bbox": [ + 53, + 53, + 294, + 159 + ], + "lines": [ + { + "bbox": [ + 53, + 53, + 294, + 159 + ], + "spans": [ + { + "bbox": [ + 53, + 53, + 294, + 159 + ], + "type": "image", + "image_path": "6f283c8ccff38019b629c7d3baf89d8f01eae9a6757bd3db846940b8dbae1d64.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 63, + 167, + 285, + 178 + ], + "lines": [ + { + "bbox": [ + 63, + 167, + 285, + 178 + ], + "spans": [ + { + "bbox": [ + 63, + 167, + 285, + 178 + ], + "type": "text", + "content": "Figure 5: An overview of hybrid modality-based approaches." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 194, + 299, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 194, + 299, + 250 + ], + "spans": [ + { + "bbox": [ + 50, + 194, + 299, + 250 + ], + "type": "text", + "content": "a three-stage method that aligns a text encoder with an LLM using large text data, followed by point-LLM alignment with 3D data. These step-by-step approaches highlight the gradual improvement of spatial reasoning in 3D contexts, offering valuable insights for future research." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 254, + 186, + 265 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 254, + 186, + 265 + ], + "spans": [ + { + "bbox": [ + 51, + 254, + 186, + 265 + ], + "type": "text", + "content": "3.2.3 Task-specific Alignment" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 265, + 299, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 265, + 299, + 462 + ], + "spans": [ + { + "bbox": [ + 50, + 265, + 299, + 462 + ], + "type": "text", + "content": "Task-specific alignment customizes models for specific spatial reasoning tasks to improve performance and generalization. SceneVerse [Jia et al., 2024] introduces a large 3D vision-language dataset and Grounded Pre-training for Scenes (GPS), using multi-level contrastive alignment for unified scene-text alignment, achieving state-of-the-art results in tasks like 3D visual grounding and question answering. LL3DA [Chen et al., 2024c] presents a dialogue system that integrates textual instructions and visual interactions, excelling in complex 3D environments. Chat-3D [Wang et al., 2023] proposes a three-stage training scheme to align 3D scene representations with language models, capturing spatial relations with limited data. VisProg [Yuan et al., 2024] introduces visual programming for zero-shot open-vocabulary 3D grounding, leveraging LLMs to generate and execute programmatic representations. These task-specific approaches highlight the importance of adapting models to complex spatial relationships, enabling robust performance even with limited data or zero-shot tasks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 468, + 129, + 478 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 468, + 129, + 478 + ], + "spans": [ + { + "bbox": [ + 51, + 468, + 129, + 478 + ], + "type": "text", + "content": "3.2.4 Discussion" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 479, + 299, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 479, + 299, + 558 + ], + "spans": [ + { + "bbox": [ + 50, + 479, + 299, + 558 + ], + "type": "text", + "content": "The three alignment approaches—Direct, Step-by-step, and Task-specific—each have distinct strengths and challenges. Direct alignment offers efficiency and quick results but struggles with complex spatial relationships. Step-by-step alignment improves feature integration at the cost of higher computational resources and training time. Task-specific alignment excels in specialized tasks but may lack broader applicability." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 563, + 275, + 575 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 563, + 275, + 575 + ], + "spans": [ + { + "bbox": [ + 51, + 563, + 275, + 575 + ], + "type": "text", + "content": "3.3 Hybrid Modality-based Spatial Reasoning" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 577, + 298, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 577, + 298, + 665 + ], + "spans": [ + { + "bbox": [ + 50, + 577, + 298, + 665 + ], + "type": "text", + "content": "Hybrid modality-based spatial reasoning integrates point clouds, images, and LLMs through Tightly Coupled and Loosely Coupled approaches, as shown in Figure 5. The Tightly Coupled approach fosters close integration, enabling seamless interaction and high performance, while the Loosely Coupled approach promotes modularity, allowing independent operation of components for greater scalability and flexibility at the cost of reduced real-time interaction." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 670, + 154, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 670, + 154, + 681 + ], + "spans": [ + { + "bbox": [ + 51, + 670, + 154, + 681 + ], + "type": "text", + "content": "3.3.1 Tightly Coupled" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 681, + 299, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 681, + 299, + 704 + ], + "spans": [ + { + "bbox": [ + 50, + 681, + 299, + 704 + ], + "type": "text", + "content": "Several recent works have explored tightly integrated approaches for spatial reasoning across point clouds, images and" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 310, + 54, + 560, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 54, + 560, + 341 + ], + "spans": [ + { + "bbox": [ + 310, + 54, + 560, + 341 + ], + "type": "text", + "content": "language modalities: Point-Bind [Guo et al., 2023] proposes a joint embedding space to align point clouds with images and text through contrastive learning. It leverages ImageBind to construct unified representations that enable tasks like zero-shot classification, open-world understanding and multi-modal generation. The tight coupling allows Point-Bind to reason about point clouds using both visual and linguistic cues. JM3D [Ji et al., 2024] introduces a Structured Multimodal Organizer that tightly fuses multi-view images and hierarchical text trees with point clouds. This coupled architecture enables detailed spatial understanding by leveraging complementary information across modalities. The Joint Multi-modal Alignment further enhances the synergistic relationships between visual and linguistic features. Uni3D [Zhou et al., 2023] employs a unified transformer architecture that directly aligns point cloud features with image-text representations. By tightly coupling the modalities through end-to-end training, it achieves strong performance on tasks like zero-shot classification and open-world understanding. The shared backbone enables efficient scaling to billion-parameter models. Uni3D-LLM [Liu et al., 2024b] extends this tight coupling to LLMs through an LLM-to-Generator mapping block. This enables unified perception, generation and editing of point clouds guided by natural language. The tight integration allows leveraging rich semantic knowledge from LLMs while maintaining high-quality 3D understanding." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 312, + 345, + 417, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 345, + 417, + 357 + ], + "spans": [ + { + "bbox": [ + 312, + 345, + 417, + 357 + ], + "type": "text", + "content": "3.3.2 Loosely Coupled" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 310, + 357, + 559, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 357, + 559, + 621 + ], + "spans": [ + { + "bbox": [ + 310, + 357, + 559, + 621 + ], + "type": "text", + "content": "Loosely coupled approaches maintain greater independence between different modalities while still enabling interaction through well-defined interfaces. MultiPLY [Hong et al., 2024] proposes a multisensory embodied LLM that handles multiple input modalities (visual, audio, tactile, thermal) through separate encoders. The modalities are processed independently and communicate through action tokens and state tokens. This decoupled design allows the system to process each modality with specialized encoders optimized for that data type, while enabling scalability and modularity in the system architecture. Similarly, UniPoint-LLM [Liu et al.] introduces a Multimodal Universal Token Space (MUTS) that loosely connects point clouds and images through independent encoders and a shared mapping layer. This modular design allows easy integration of new modalities and simplified training by only requiring alignment between new modalities and text, rather than pairwise alignment between all modalities. The main benefits of loosely coupled architectures include greater modularity and flexibility in system design, easier integration of new modalities, and independent scaling of different components. However, this approach may result in less optimal joint representation learning, reduced real-time interaction capabilities, and potential information loss between modalities compared to tightly coupled approaches." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 312, + 626, + 390, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 626, + 390, + 636 + ], + "spans": [ + { + "bbox": [ + 312, + 626, + 390, + 636 + ], + "type": "text", + "content": "3.3.3 Discussion" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 310, + 638, + 559, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 638, + 559, + 704 + ], + "spans": [ + { + "bbox": [ + 310, + 638, + 559, + 704 + ], + "type": "text", + "content": "The choice between tightly and loosely coupled approaches presents important tradeoffs in multimodal spatial reasoning systems. Tightly coupled approaches like Point-Bind and JM3D offer stronger joint representation learning and real-time interaction capabilities through end-to-end training and shared feature spaces. This makes them particularly suitable" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 53, + 52, + 558, + 313 + ], + "blocks": [ + { + "bbox": [ + 53, + 52, + 558, + 313 + ], + "lines": [ + { + "bbox": [ + 53, + 52, + 558, + 313 + ], + "spans": [ + { + "bbox": [ + 53, + 52, + 558, + 313 + ], + "type": "table", + "html": "
ModelData SourceAlignment TypePre-trainingFine-tuningTaskCode
Image - basedLLaVA-3D [Zhu et al., 2024b]Multi-view Images-3D VQA, 3D Scene Understandingcode
Agent3D-Zero [Zhang et al., 2024]Multi-view Images-3D VQA, 3D Semantic Segmentation
ShapeLLM [Qi et al., 2024a]Multi-view Images-3D Object Classification, 3D Scene Captioningcode
Scene-LLM [Fu et al., 2024]Multi-view Images-3D VQA, Dense Captioning
SpatialPIN [Ma et al., 2024a]RGB-D Images-3D Motion Planning, Task Video Generation
LLMI3D [Yang et al., 2024]Monocular Images-3D Grounding, 3D VQA
Spatialvm [Chen et al., 2024a]Monocular Images-Dense Reward Annotator, Spatial Data Generationcode
M3D-LaMed [Bai et al., 2024]Medical Images-3D VQA, 3D VLPcode
HILT [Liu et al., 2024a]Medical Images-3DHRG
3D-CT-GPT [Chen et al., 2024b]Medical Images-Radiology Report Generation, 3D VQA
OpenMEDLab [Wang et al., 2024]Medical Images-Medical Imagingcode
Point Cloud - basedPointLLM [Xu et al., 2025]Point CloudDirect Alignment3D Object Classification, 3D Object Captioningcode
Chat-Scene [Huang et al., 2024]Point CloudDirect Alignment3D Visual Grounding, 3D Scene Captioningcode
PointCLIP [Zhang et al., 2022]Point CloudDirect Alignment3D Point Cloud Classificationcode
PointCLIPv2 [Zhu et al., 2023]Point CloudDirect Alignment3D Point Cloud Classificationcode
GPT4Point [Qi et al., 2024b]Point CloudStep-by-step Alignment3D Object Understandingcode
MiniGPT-3D [Tang et al., 2024a]Point CloudStep-by-step Alignment3D Object Classification, 3D Object Captioningcode
GreenPLM [Tang et al., 2024b]Point CloudStep-by-step Alignment3D Object Classificationcode
Grounded 3D-LLM [Chen et al., 2024d]Point CloudStep-by-step Alignment3D Object Detection, 3D VQAcode
Lidar-LLM [Yang et al., 2023]Point CloudStep-by-step Alignment3D Captioning, 3D Groundingcode
3D-LLaVA [Deng et al., 2025]Point CloudTask-specific Alignment3D VQA, 3D Captioningcode
ScanReason [Zhu et al., 2024a]Point CloudTask-specific Alignment3D Reasoning Groundingcode
SegPoint [He et al., 2024]Point CloudTask-specific Alignment3D Instruction Segmentation
Kestrel [Fei et al., 2024]Point CloudTask-specific AlignmentPart-Aware Point Grounding
SIG3D [Man et al., 2024]Point CloudTask-specific AlignmentSituation Estimationcode
Chat-3D [Wang et al., 2023]Point CloudTask-specific Alignment3D VQAcode
LL3DA [Chen et al., 2024c]Point CloudTask-specific Alignment3D Dense Captioningcode
Hybrid - basedPoint-bind [Guo et al., 2023]Point cloud, ImageTightly Coupled3D Cross-modal Retrieval, Any-to-3D Generationcode
JM3D [Ji et al., 2024]Point cloud, ImageTightly CoupledImage-3D Retrieval, 3D Part Segmentationcode
Uni3D [Zhou et al., 2023]Point cloud, ImageTightly CoupledZero-shot Shape Classificationcode
Uni3D-LLM [Liu et al., 2024b]Point cloud, ImageTightly Coupled3D VQA
MultiPLY [Hong et al., 2024]Point cloud, ImageLoosely CoupledObject retrievalcode
UniPoint-LLM [Liu et al.]Point cloud, ImageLoosely Coupled3D generation, 3D VQA
", + "image_path": "d940e65bdb4924227a21274aee6d75d1ec4fbdde1933224aff9d92e6f49a75fb.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 320, + 559, + 363 + ], + "lines": [ + { + "bbox": [ + 50, + 320, + 559, + 363 + ], + "spans": [ + { + "bbox": [ + 50, + 320, + 559, + 363 + ], + "type": "text", + "content": "Table 1: Taxonomy of Large Language Models with spatial reasoning capability. This table presents a comprehensive comparison of various 3D vision-language models categorized by their input modalities (image-based, point cloud-based, and hybrid-based), showing their data sources, alignment types, training strategies (pre-training and fine-tuning), primary tasks, and code availability. The models are organized into three main categories based on their input type: image-based models, point cloud-based models, and hybrid models that utilize both modalities." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 50, + 380, + 298, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 380, + 298, + 568 + ], + "spans": [ + { + "bbox": [ + 50, + 380, + 298, + 568 + ], + "type": "text", + "content": "for applications requiring detailed spatial understanding and precise control. However, they can be more complex to train and scale, and adding new modalities may require significant architectural changes. In contrast, loosely coupled approaches like MultiPLY and UniPoint-LLM provide greater modularity and flexibility, making them easier to extend and maintain. They allow independent optimization of different components and simplified training procedures, but may sacrifice some performance in tasks requiring fine-grained cross-modal understanding. The optimal choice ultimately depends on specific application requirements - tightly coupled architectures may be preferred for specialized high-performance systems, while loosely coupled designs better suit general-purpose platforms prioritizing extensibility and maintainability. Future work may explore hybrid approaches that combine the benefits of both paradigms, potentially using adaptive coupling mechanisms that adjust based on task demands." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 582, + 137, + 596 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 582, + 137, + 596 + ], + "spans": [ + { + "bbox": [ + 51, + 582, + 137, + 596 + ], + "type": "text", + "content": "4 Applications" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 600, + 298, + 678 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 600, + 298, + 678 + ], + "spans": [ + { + "bbox": [ + 50, + 600, + 298, + 678 + ], + "type": "text", + "content": "A key research focus leverages LLMs to enhance robotic embodied intelligence, enabling machines to interpret natural language commands for real-world tasks. This includes robotic control, navigation, and manipulation, where LLMs parse instructions, generate action plans, and adapt to dynamic environments—for instance, guiding robots to locate objects in cluttered spaces using text-based prompts." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 681, + 299, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 681, + 299, + 706 + ], + "spans": [ + { + "bbox": [ + 51, + 681, + 299, + 706 + ], + "type": "text", + "content": "3D Scene Understanding. Advanced 3D scene analysis integrates multimodal data (e.g., images, point clouds, text) for" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 310, + 380, + 559, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 380, + 559, + 449 + ], + "spans": [ + { + "bbox": [ + 310, + 380, + 559, + 449 + ], + "type": "text", + "content": "tasks like open-vocabulary segmentation, semantic mapping, and spatial reasoning. Central to this is 3D visual question answering (3D-VQA), requiring models to interpret queries about object attributes, spatial relationships, or contextual roles within scenes. Context-aware systems further account for user perspectives to deliver precise responses." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 310, + 453, + 559, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 453, + 559, + 544 + ], + "spans": [ + { + "bbox": [ + 310, + 453, + 559, + 544 + ], + "type": "text", + "content": "Cross-Domain Applications. In healthcare, LLMs analyze volumetric medical scans (e.g., CT) for lesion detection and automated diagnostics. Autonomous driving systems utilize 3D-capable LLMs to interpret traffic scenes, aiding object detection [Zha et al., 2023, 2024] and path planning. Design-oriented applications include generating indoor layouts from textual requirements, while educational tools employ interactive 3D environments to teach spatial concepts." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 312, + 567, + 504, + 580 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 567, + 504, + 580 + ], + "spans": [ + { + "bbox": [ + 312, + 567, + 504, + 580 + ], + "type": "text", + "content": "5 Challenges and Future Directions" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 310, + 594, + 559, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 594, + 559, + 705 + ], + "spans": [ + { + "bbox": [ + 310, + 594, + 559, + 705 + ], + "type": "text", + "content": "Table 1 summarizes the models that leverage LLMs to assist graph-related tasks according to the proposed taxonomy. Based on the above review and analysis, we believe that there is still much space for further enhancement in this field. Recent advances in integrating LLMs with three-dimensional (3D) data have demonstrated considerable promise. However, numerous challenges must still be overcome to realize robust and practical 3D-aware LLMs. Below, we summarize these obstacles and then outline potential pathways to address them, highlighting key research directions for the future." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 54, + 129, + 66 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 54, + 129, + 66 + ], + "spans": [ + { + "bbox": [ + 52, + 54, + 129, + 66 + ], + "type": "text", + "content": "5.1 Challenges" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 76, + 297, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 76, + 297, + 174 + ], + "spans": [ + { + "bbox": [ + 53, + 76, + 297, + 174 + ], + "type": "text", + "content": "Weak Spatial Reasoning and Representation. Multimodal LLMs (MLLMs) exhibit limited acuity in 3D spatial understanding, struggling with fine-grained relationships (e.g., front/back distinctions, occluded object localization) and precise geometric outputs (distances, angles). These issues stem partly from mismatches between unstructured point clouds and sequence-based LLM architectures, where high-dimensional 3D data incur prohibitive token counts or oversimplified encodings." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 178, + 297, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 178, + 297, + 332 + ], + "spans": [ + { + "bbox": [ + 53, + 178, + 297, + 332 + ], + "type": "text", + "content": "Data and Evaluation Gaps. Progress in 3D-aware LLMs is hindered by the scarcity of high-quality 3D-text paired datasets. Unlike the abundant resources for 2D images and video, the 3D domain lacks standardized, richly annotated datasets crucial for training robust models. Existing benchmarks focus mainly on discriminative tasks like classification and retrieval—emphasizing category differentiation rather than generating rich, descriptive 3D scene outputs. Consequently, evaluations often rely on subjective metrics (e.g., human or GPT-based judgments) that can lack consistency. Advancing the field requires developing objective, comprehensive benchmarks that assess both open-vocabulary generation and the spatial plausibility of descriptions relative to the underlying 3D structure." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 337, + 297, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 337, + 297, + 513 + ], + "spans": [ + { + "bbox": [ + 53, + 337, + 297, + 513 + ], + "type": "text", + "content": "Multimodal Integration and Generalization. Fusing 3D data (e.g., point clouds) with other modalities like 2D imagery, audio, or text poses significant challenges due to their distinct structural characteristics. The conversion and alignment of high-dimensional 3D data with lower-dimensional representations can lead to a loss of intricate details, diluting the original 3D richness. Moreover, current models often struggle with open-vocabulary recognition, limiting their ability to identify or describe objects outside of their training data—especially when encountering unseen scenes or novel objects. This difficulty is further compounded by the variability of natural language, from colloquial expressions to domain-specific terminology, and by noisy inputs. Thus, more sophisticated multimodal integration techniques and generalization strategies are needed to preserve geometric fidelity while accommodating diverse, unpredictable inputs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 517, + 297, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 517, + 297, + 594 + ], + "spans": [ + { + "bbox": [ + 53, + 517, + 297, + 594 + ], + "type": "text", + "content": "Complex Task Definition. While 3D-aware LLMs excel in controlled settings, they lack frameworks for nuanced language-context inference in dynamic environments. Task decomposition and scalable encoding methods are needed to balance geometric fidelity with computational tractability, particularly for interactive applications requiring real-time spatial reasoning." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 605, + 160, + 616 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 605, + 160, + 616 + ], + "spans": [ + { + "bbox": [ + 53, + 605, + 160, + 616 + ], + "type": "text", + "content": "5.2 Future Directions" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 627, + 297, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 627, + 297, + 704 + ], + "spans": [ + { + "bbox": [ + 53, + 627, + 297, + 704 + ], + "type": "text", + "content": "Enhancing 3D Perception and Representations. Addressing spatial reasoning gaps requires richer 3D-text datasets (e.g., from robotics, gaming, autonomous driving) and model architectures that encode geometric relationships. Multi-view data and robust depth cues can improve orientation, distance, and occlusion estimation. Compact 3D tokens and refined encoding/decoding methods may bridge unstructured point" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 314, + 55, + 558, + 76 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 55, + 558, + 76 + ], + "spans": [ + { + "bbox": [ + 314, + 55, + 558, + 76 + ], + "type": "text", + "content": "clouds with sequence-based models, enabling fine-grained spatial understanding and generation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 80, + 558, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 80, + 558, + 213 + ], + "spans": [ + { + "bbox": [ + 314, + 80, + 558, + 213 + ], + "type": "text", + "content": "Multi-Modal Fusion and Instruction Understanding. Tighter integration of modalities (point clouds, images, text, audio) via unified latent spaces or attention mechanisms could preserve subtle geometric and semantic details. Enhanced instruction processing—including hierarchical task decomposition, contextual interpretation, and robustness to dialects/terminology—would improve compositional reasoning in 3D environments and broaden real-world applicability. Furthermore, by leveraging these integrated representations, models can more adeptly adapt to complex instructions and novel scenarios, ultimately paving the way for more robust and versatile 3D reasoning systems." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 314, + 217, + 558, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 217, + 558, + 304 + ], + "spans": [ + { + "bbox": [ + 314, + 217, + 558, + 304 + ], + "type": "text", + "content": "Cross-Scene Generalization and Robust Evaluation. Open-vocabulary 3D understanding demands large-scale pretraining on diverse scenes and transfer/lifelong learning paradigms for adapting to novel objects or environments. This understanding extends beyond predefined categories to generalize to unseen objects and scenes. For instance, models need to comprehend \"an old rocking chair\" even if this specific type of chair never appeared in the training data." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 308, + 558, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 308, + 558, + 385 + ], + "spans": [ + { + "bbox": [ + 314, + 308, + 558, + 385 + ], + "type": "text", + "content": "Expanding Applications for Autonomous Systems. 3D-aware LLMs hold potential in robotics (navigation, manipulation), medical imaging (lesion detection), architectural design, and interactive education. Future systems may integrate environmental constraints, user perspectives, and object affordances for autonomous planning and decision-making in dynamic 3D contexts." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 386, + 558, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 386, + 558, + 474 + ], + "spans": [ + { + "bbox": [ + 314, + 386, + 558, + 474 + ], + "type": "text", + "content": "Collectively, these challenges and potential directions underscore the field's rapid evolution and its equally significant open questions. Moving forward, more robust 3D-specific data resources, better model architectures, and more refined evaluation protocols will be essential to unlock the full potential of LLMs in three-dimensional settings—and ultimately bring intelligent, multimodal understanding closer to real-world deployment." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 488, + 389, + 500 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 488, + 389, + 500 + ], + "spans": [ + { + "bbox": [ + 314, + 488, + 389, + 500 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 506, + 558, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 506, + 558, + 704 + ], + "spans": [ + { + "bbox": [ + 314, + 506, + 558, + 704 + ], + "type": "text", + "content": "The integration of LLMs with 3D data is a dynamic research area. This survey categorized 3D-LLM research into image-based, point cloud-based, and hybrid modality-based spatial reasoning. It reviewed state-of-the-art methods, their applications in multiple fields, and associated challenges. Notably, image-based methods have data-related advantages but face issues like depth information shortage. Point cloud-based methods offer precise 3D details but encounter data-handling difficulties. Hybrid methods combine strengths yet struggle with data alignment. Applications are diverse, but challenges such as weak spatial perception, data scarcity, and evaluation problems exist. Future research should focus on enhancing 3D perception, improving multi-modal fusion, expanding generalization, developing evaluation metrics, enhancing instruction understanding, optimizing 3D representations, and exploring continuous learning. By addressing these, we can unlock the full potential of 3D-aware LLMs for real-world deployment and industry advancement." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 53, + 110, + 65 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 53, + 110, + 65 + ], + "spans": [ + { + "bbox": [ + 52, + 53, + 110, + 65 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 52, + 68, + 298, + 704 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 52, + 68, + 298, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 68, + 298, + 112 + ], + "spans": [ + { + "bbox": [ + 52, + 68, + 298, + 112 + ], + "type": "text", + "content": "Fan Bai, Yuxin Du, Tiejun Huang, Max Q-H Meng, and Bo Zhao. M3d: Advancing 3d medical image analysis with multi-modal large language models. arXiv preprint arXiv:2404.00578, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 116, + 298, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 116, + 298, + 182 + ], + "spans": [ + { + "bbox": [ + 52, + 116, + 298, + 182 + ], + "type": "text", + "content": "Boyuan Chen, Zhuo Xu, Sean Kirmani, Brain Ichter, Dorsa Sadigh, Leonidas Guibas, and Fei Xia. Spatialvlm: Endowing vision-language models with spatial reasoning capabilities. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14455-14465, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 185, + 298, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 185, + 298, + 239 + ], + "spans": [ + { + "bbox": [ + 52, + 185, + 298, + 239 + ], + "type": "text", + "content": "Hao Chen, Wei Zhao, Yingli Li, Tianyang Zhong, Yisong Wang, Youlan Shang, Lei Guo, Junwei Han, Tianming Liu, Jun Liu, et al. 3d-ct-gpt: Generating 3d radiology reports through integration of large vision-language models. arXiv preprint arXiv:2409.19330, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 243, + 298, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 243, + 298, + 309 + ], + "spans": [ + { + "bbox": [ + 52, + 243, + 298, + 309 + ], + "type": "text", + "content": "Sijin Chen, Xin Chen, Chi Zhang, Mingsheng Li, Gang Yu, Hao Fei, Hongyuan Zhu, Jiayuan Fan, and Tao Chen. Ll3da: Visual interactive instruction tuning for omni-3d understanding reasoning and planning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26428-26438, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 312, + 298, + 355 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 312, + 298, + 355 + ], + "spans": [ + { + "bbox": [ + 52, + 312, + 298, + 355 + ], + "type": "text", + "content": "Yilun Chen, Shuai Yang, Haifeng Huang, Tai Wang, Ruiyuan Lyu, Runsen Xu, Dahua Lin, and Jiangmiao Pang. Grounded 3d-llm with referent tokens. arXiv preprint arXiv:2405.10370, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 359, + 298, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 359, + 298, + 403 + ], + "spans": [ + { + "bbox": [ + 52, + 359, + 298, + 403 + ], + "type": "text", + "content": "Jiajun Deng, Tianyu He, Li Jiang, Tianyu Wang, Feras Dayoub, and Ian Reid. 3d-llava: Towards generalist 3d lmm's with omni superpoint transformer. arXiv preprint arXiv:2501.01163, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 406, + 298, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 406, + 298, + 450 + ], + "spans": [ + { + "bbox": [ + 52, + 406, + 298, + 450 + ], + "type": "text", + "content": "Junjie Fei, Mahmoud Ahmed, Jian Ding, Eslam Mohamed Bakr, and Mohamed Elhoseiny. Kestrel: Point grounding multimodal llm for part-aware 3d vision-language understanding. arXiv preprint arXiv:2405.18937, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 453, + 298, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 453, + 298, + 496 + ], + "spans": [ + { + "bbox": [ + 52, + 453, + 298, + 496 + ], + "type": "text", + "content": "Rao Fu, Jingyu Liu, Xilun Chen, Yixin Nie, and Wenhan Xiong. Scene-llm: Extending language model for 3d visual understanding and reasoning. arXiv preprint arXiv:2403.11401, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 500, + 298, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 500, + 298, + 555 + ], + "spans": [ + { + "bbox": [ + 52, + 500, + 298, + 555 + ], + "type": "text", + "content": "Chen Gao, Baining Zhao, Weichen Zhang, Jinzhu Mao, Jun Zhang, Zhiheng Zheng, Fanhang Man, Jianjie Fang, Zile Zhou, Jinqiang Cui, et al. Embodiedcity: A benchmark platform for embodied agent in real-world city environment. arXiv preprint arXiv:2410.09604, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 558, + 298, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 558, + 298, + 634 + ], + "spans": [ + { + "bbox": [ + 52, + 558, + 298, + 634 + ], + "type": "text", + "content": "Qiao Gu, Ali Kuwajerwala, Sacha Morin, Krishna Murthy Jatavallabhula, Bipasha Sen, Aditya Agarwal, Corban Rivera, William Paul, Kirsty Ellis, Rama Chellappa, et al. Conceptgraphs: Open-vocabulary 3d scene graphs for perception and planning. In 2024 IEEE International Conference on Robotics and Automation (ICRA), pages 5021-5028. IEEE, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 638, + 298, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 638, + 298, + 704 + ], + "spans": [ + { + "bbox": [ + 52, + 638, + 298, + 704 + ], + "type": "text", + "content": "Ziyu Guo, Renrui Zhang, Xiangyang Zhu, Yiwen Tang, Xi-anzheng Ma, Jiaming Han, Kexin Chen, Peng Gao, Xi-anzhi Li, Hongsheng Li, et al. Point-bind & point-llm: Aligning point cloud with multi-modality for 3d understanding, generation, and instruction following. arXiv preprint arXiv:2309.00615, 2023." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 313, + 55, + 558, + 704 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 313, + 55, + 558, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 55, + 558, + 99 + ], + "spans": [ + { + "bbox": [ + 313, + 55, + 558, + 99 + ], + "type": "text", + "content": "Shuting He, Henghui Ding, Xudong Jiang, and Bihan Wen. Segpoint: Segment any point cloud via large language model. In European Conference on Computer Vision, pages 349-367. Springer, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 102, + 558, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 102, + 558, + 156 + ], + "spans": [ + { + "bbox": [ + 313, + 102, + 558, + 156 + ], + "type": "text", + "content": "Yining Hong, Haoyu Zhen, Peihao Chen, Shuhong Zheng, Yilun Du, Zhenfang Chen, and Chuang Gan. 3d-llm: Injecting the 3d world into large language models. Advances in Neural Information Processing Systems, 36:20482-20494, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 162, + 558, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 162, + 558, + 217 + ], + "spans": [ + { + "bbox": [ + 313, + 162, + 558, + 217 + ], + "type": "text", + "content": "Yining Hong, Zishuo Zheng, Peihao Chen, Yian Wang, Junyan Li, and Chuang Gan. Multiply: A multisensory object-centric embodied large language model in 3d world. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26406-26416, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 220, + 558, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 220, + 558, + 286 + ], + "spans": [ + { + "bbox": [ + 313, + 220, + 558, + 286 + ], + "type": "text", + "content": "Haifeng Huang, Yilun Chen, Zehan Wang, Rongjie Huang, Runsen Xu, Tai Wang, Luping Liu, Xize Cheng, Yang Zhao, Jiangmiao Pang, et al. Chat-scene: Bridging 3d scene and large language models with object identifiers. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 290, + 558, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 290, + 558, + 335 + ], + "spans": [ + { + "bbox": [ + 313, + 290, + 558, + 335 + ], + "type": "text", + "content": "Jiayi Ji, Haowei Wang, Changli Wu, Yiwei Ma, Xiaoshuai Sun, and Rongrong Ji. Jm3d & jm3d-llm: Elevating 3d representation with joint multi-modal cues. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 339, + 558, + 394 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 339, + 558, + 394 + ], + "spans": [ + { + "bbox": [ + 313, + 339, + 558, + 394 + ], + "type": "text", + "content": "Baoxiong Jia, Yixin Chen, Huangyue Yu, Yan Wang, Xuesong Niu, Tengyu Liu, Qing Li, and Siyuan Huang. Sceneverse: Scaling 3d vision-language learning for grounded scene understanding. In European Conference on Computer Vision, pages 289-310. Springer, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 398, + 558, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 398, + 558, + 453 + ], + "spans": [ + { + "bbox": [ + 313, + 398, + 558, + 453 + ], + "type": "text", + "content": "Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 2019." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 456, + 558, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 456, + 558, + 511 + ], + "spans": [ + { + "bbox": [ + 313, + 456, + 558, + 511 + ], + "type": "text", + "content": "Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In International conference on machine learning, pages 12888-12900. PMLR, 2022." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 515, + 558, + 560 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 515, + 558, + 560 + ], + "spans": [ + { + "bbox": [ + 313, + 515, + 558, + 560 + ], + "type": "text", + "content": "Dingning Liu, Xiaoshui Huang, Zhihui Wang, Zhenfei Yin, Peng Gao, Yujiao Wu, Yuenan Hou, Xinzhu Ma, and Wanli Ouyang. Pointmllm: Aligning multi-modality with llm for point cloud understanding, generation and editing." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 564, + 558, + 618 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 564, + 558, + 618 + ], + "spans": [ + { + "bbox": [ + 313, + 564, + 558, + 618 + ], + "type": "text", + "content": "Che Liu, Zhongwei Wan, Yuqi Wang, Hui Shen, Haozhe Wang, Kangyu Zheng, Mi Zhang, and Rossella Arcucci. Benchmarking and boosting radiology report generation for 3d high-resolution medical images. arXiv preprint arXiv:2406.07146, 2024." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 623, + 558, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 623, + 558, + 677 + ], + "spans": [ + { + "bbox": [ + 313, + 623, + 558, + 677 + ], + "type": "text", + "content": "Dingning Liu, Xiaoshui Huang, Yuenan Hou, Zhihui Wang, Zhenfei Yin, Yongshun Gong, Peng Gao, and Wanli Ouyang. Uni3d-llm: Unifying point cloud perception, generation and editing with large language models. arXiv preprint arXiv:2402.03327, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 681, + 558, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 681, + 558, + 704 + ], + "spans": [ + { + "bbox": [ + 313, + 681, + 558, + 704 + ], + "type": "text", + "content": "Chenyang Ma, Kai Lu, Ta-Ying Cheng, Niki Trigoni, and Andrew Markham. Spatialpin: Enhancing spatial reasoning" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 54, + 299, + 704 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 62, + 54, + 299, + 87 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 54, + 299, + 87 + ], + "spans": [ + { + "bbox": [ + 62, + 54, + 299, + 87 + ], + "type": "text", + "content": "capabilities of vision-language models through prompting and interacting 3d priors. arXiv preprint arXiv:2403.13438, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 52, + 92, + 299, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 92, + 299, + 159 + ], + "spans": [ + { + "bbox": [ + 52, + 92, + 299, + 159 + ], + "type": "text", + "content": "Xianzheng Ma, Yash Bhalgat, Brandon Smart, Shuai Chen, Xinghui Li, Jian Ding, Jindong Gu, Dave Zhenyu Chen, Songyou Peng, Jia-Wang Bian, et al. When llms step into the 3d world: A survey and meta-analysis of 3d tasks via multi-modal large language models. arXiv preprint arXiv:2405.10255, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 163, + 299, + 208 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 163, + 299, + 208 + ], + "spans": [ + { + "bbox": [ + 52, + 163, + 299, + 208 + ], + "type": "text", + "content": "Yuexin Ma, Tai Wang, Xuyang Bai, Huitong Yang, Yuenan Hou, Yaming Wang, Yu Qiao, Ruigang Yang, and Xinge Zhu. Vision-centric bev perception: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 213, + 299, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 213, + 299, + 257 + ], + "spans": [ + { + "bbox": [ + 52, + 213, + 299, + 257 + ], + "type": "text", + "content": "Yunze Man, Liang-Yan Gui, and Yu-Xiong Wang. Situational awareness matters in 3d vision language reasoning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13678-13688, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 262, + 299, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 262, + 299, + 317 + ], + "spans": [ + { + "bbox": [ + 52, + 262, + 299, + 317 + ], + "type": "text", + "content": "Zekun Qi, Runpei Dong, Shaochen Zhang, Haoran Geng, Chunrui Han, Zheng Ge, Li Yi, and Kaiheng Ma. Shapellm: Universal 3d object understanding for embodied interaction. In European Conference on Computer Vision, pages 214-238. Springer, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 322, + 299, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 322, + 299, + 388 + ], + "spans": [ + { + "bbox": [ + 52, + 322, + 299, + 388 + ], + "type": "text", + "content": "Zhangyang Qi, Ye Fang, Zeyi Sun, Xiaoyang Wu, Tong Wu, Jiaqi Wang, Dahua Lin, and Hengshuang Zhao. Gpt4point: A unified framework for point-language understanding and generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26417-26427, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 392, + 299, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 392, + 299, + 459 + ], + "spans": [ + { + "bbox": [ + 52, + 392, + 299, + 459 + ], + "type": "text", + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 464, + 299, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 464, + 299, + 486 + ], + "spans": [ + { + "bbox": [ + 52, + 464, + 299, + 486 + ], + "type": "text", + "content": "Alec Radford. Improving language understanding by generative pre-training. 2018." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 491, + 299, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 491, + 299, + 546 + ], + "spans": [ + { + "bbox": [ + 52, + 491, + 299, + 546 + ], + "type": "text", + "content": "Yuan Tang, Xu Han, Xianzhi Li, Qiao Yu, Yixue Hao, Long Hu, and Min Chen. Minigpt-3d: Efficiently aligning 3d point clouds with large language models using 2d priors. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 6617-6626, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 551, + 299, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 551, + 299, + 596 + ], + "spans": [ + { + "bbox": [ + 52, + 551, + 299, + 596 + ], + "type": "text", + "content": "Yuan Tang, Xu Han, Xianzhi Li, Qiao Yu, Jinfeng Xu, Yixue Hao, Long Hu, and Min Chen. More text, less point: Towards 3d data-efficient point-language understanding. arXiv preprint arXiv:2408.15966, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 600, + 299, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 600, + 299, + 643 + ], + "spans": [ + { + "bbox": [ + 52, + 600, + 299, + 643 + ], + "type": "text", + "content": "Zehan Wang, Haifeng Huang, Yang Zhao, Ziang Zhang, and Zhou Zhao. Chat-3d: Data-efficiently tuning large language model for universal dialogue of 3d scenes. arXiv preprint arXiv:2308.08769, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 648, + 299, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 648, + 299, + 704 + ], + "spans": [ + { + "bbox": [ + 52, + 648, + 299, + 704 + ], + "type": "text", + "content": "Xiaosong Wang, Xiaofan Zhang, Guotai Wang, Junjun He, Zhongyu Li, Wentao Zhu, Yi Guo, Qi Dou, Xiaoxiao Li, Dequan Wang, et al. Openmedlab: An open-source platform for multi-modality foundation models in medicine. arXiv preprint arXiv:2402.18028, 2024." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 313, + 54, + 559, + 700 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 313, + 54, + 559, + 109 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 54, + 559, + 109 + ], + "spans": [ + { + "bbox": [ + 313, + 54, + 559, + 109 + ], + "type": "text", + "content": "Runsen Xu, Xiaolong Wang, Tai Wang, Yilun Chen, Jiangmiao Pang, and Dahua Lin. Pointllm: Empowering large language models to understand point clouds. In European Conference on Computer Vision, pages 131-147. Springer, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 114, + 559, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 114, + 559, + 168 + ], + "spans": [ + { + "bbox": [ + 313, + 114, + 559, + 168 + ], + "type": "text", + "content": "Senqiao Yang, Jiaming Liu, Ray Zhang, Mingjie Pan, Zoey Guo, Xiaqi Li, Zehui Chen, Peng Gao, Yandong Guo, and Shanghang Zhang. Lidar-llm: Exploring the potential of large language models for 3d lidar understanding. arXiv preprint arXiv:2312.14074, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 172, + 559, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 172, + 559, + 227 + ], + "spans": [ + { + "bbox": [ + 313, + 172, + 559, + 227 + ], + "type": "text", + "content": "Fan Yang, Sicheng Zhao, Yanhao Zhang, Haoxiang Chen, Hui Chen, Wenbo Tang, Haonan Lu, Pengfei Xu, Zhenyu Yang, Jungong Han, et al. Llmi3d: Empowering llm with 3d perception from a single 2d image. arXiv preprint arXiv:2408.07422, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 231, + 559, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 231, + 559, + 286 + ], + "spans": [ + { + "bbox": [ + 313, + 231, + 559, + 286 + ], + "type": "text", + "content": "Zhihao Yuan, Jinke Ren, Chun-Mei Feng, Hengshuang Zhao, Shuguang Cui, and Zhen Li. Visual programming for zero-shot open-vocabulary 3d visual grounding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20623-20633, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 289, + 559, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 289, + 559, + 334 + ], + "spans": [ + { + "bbox": [ + 313, + 289, + 559, + 334 + ], + "type": "text", + "content": "Jirong Zha, Liang Han, Xiwang Dong, and Zhang Ren. Privacy-preserving push-sum distributed cubature information filter for nonlinear target tracking with switching directed topologies. ISA transactions, 136:16-30, 2023." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 338, + 559, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 338, + 559, + 380 + ], + "spans": [ + { + "bbox": [ + 313, + 338, + 559, + 380 + ], + "type": "text", + "content": "Jirong Zha, Nan Zhou, Zhenyu Liu, Tao Sun, and Xinlei Chen. Diffusion-based filter for fast and accurate collaborative tracking with low data transmission. Authorea Preprints, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 385, + 559, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 385, + 559, + 441 + ], + "spans": [ + { + "bbox": [ + 313, + 385, + 559, + 441 + ], + "type": "text", + "content": "Renrui Zhang, Ziyu Guo, Wei Zhang, Kunchang Li, Xupeng Miao, Bin Cui, Yu Qiao, Peng Gao, and Hongsheng Li. Pointclip: Point cloud understanding by clip. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8552-8562, 2022." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 445, + 559, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 445, + 559, + 498 + ], + "spans": [ + { + "bbox": [ + 313, + 445, + 559, + 498 + ], + "type": "text", + "content": "Sha Zhang, Di Huang, Jiajun Deng, Shixiang Tang, Wanli Ouyang, Tong He, and Yanyong Zhang. Agent3d-zero: An agent for zero-shot 3d understanding. In European Conference on Computer Vision, pages 186-202. Springer, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 503, + 559, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 503, + 559, + 546 + ], + "spans": [ + { + "bbox": [ + 313, + 503, + 559, + 546 + ], + "type": "text", + "content": "Junsheng Zhou, Jinsheng Wang, Baorui Ma, Yu-Shen Liu, Tiejun Huang, and Xinlong Wang. Uni3d: Exploring unified 3d representation at scale. arXiv preprint arXiv:2310.06773, 2023." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 551, + 559, + 606 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 551, + 559, + 606 + ], + "spans": [ + { + "bbox": [ + 313, + 551, + 559, + 606 + ], + "type": "text", + "content": "Xiangyang Zhu, Renrui Zhang, Bowei He, Ziyu Guo, Ziyao Zeng, Zipeng Qin, Shanghang Zhang, and Peng Gao. Pointclip v2: Prompting clip and gpt for powerful 3d open-world learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2639-2650, 2023." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 609, + 559, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 609, + 559, + 654 + ], + "spans": [ + { + "bbox": [ + 313, + 609, + 559, + 654 + ], + "type": "text", + "content": "Chenming Zhu, Tai Wang, Wenwei Zhang, Kai Chen, and Xihui Liu. Scanreason: Empowering 3d visual grounding with reasoning capabilities. In European Conference on Computer Vision, pages 151-168. Springer, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 658, + 559, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 658, + 559, + 700 + ], + "spans": [ + { + "bbox": [ + 313, + 658, + 559, + 700 + ], + "type": "text", + "content": "Chenming Zhu, Tai Wang, Wenwei Zhang, Jiangmiao Pang, and Xihui Liu. Llava-3d: A simple yet effective pathway to empowering lmm with 3d-awareness. arXiv preprint arXiv:2409.18125, 2024." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05792/2e1d882c-c0bf-4c6f-98d8-3ae7d8fb9f26_content_list.json b/data/2025/2504_05xxx/2504.05792/2e1d882c-c0bf-4c6f-98d8-3ae7d8fb9f26_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..d571433bec4d9d50a29065043c72729beef55b90 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/2e1d882c-c0bf-4c6f-98d8-3ae7d8fb9f26_content_list.json @@ -0,0 +1,1238 @@ +[ + { + "type": "text", + "text": "Pinching-Antenna Assisted ISAC: A CRLB Perspective", + "text_level": 1, + "bbox": [ + 124, + 41, + 872, + 69 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhiguo Ding, Fellow, IEEE", + "bbox": [ + 390, + 79, + 599, + 95 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract—Recently, pinching antennas have attracted significant research interest due to their capability to reconfigure wireless channels as well as their array configuration flexibility. This letter focuses on how these features can be used to support integrated sensing and communications (ISAC) from the Cramér-Rao lower bound (CRLB) perspective. In particular, the CRLB achieved by pinching antennas is first derived and then compared to that of conventional antennas. The presented analytical and simulation results demonstrate that using pinching antennas can significantly reduce CRLB and, hence, enhance positioning accuracy. In addition, this letter also reveals that the low-cost and reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning.", + "bbox": [ + 73, + 114, + 491, + 281 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Index Terms—Pinching antennas, integrated sensing and communications (ISAC), Cramér-Rao lower bound (CRLB), estimation theory.", + "bbox": [ + 73, + 282, + 491, + 321 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "I. INTRODUCTION", + "text_level": 1, + "bbox": [ + 215, + 325, + 349, + 339 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, pinching antennas have received significant attention from both academia and industry as a novel evolution of smart antennas, and offer three distinguished features [1], [2]. One is their capability to create strong line-of-sight (LoS) links between the transceivers, which means that large-scale path losses and LoS blockage can be effectively mitigated by activating antennas close to users [3]. The second feature is the reconfigurability of pinching-antenna systems, where the topology of a pinching-antenna array, e.g., the locations and the number of pinching antennas, can be flexibly adjusted. The third feature is their practicality, where DOCOMO's prototype shows that pinching antennas can be straightforwardly implemented in a low-cost manner [1].", + "bbox": [ + 73, + 345, + 491, + 541 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In the literature, there already exists a large amount of work to demonstrate that the use of pinching antennas can significantly enhance the communication functionality of wireless networks. For example, the fundamental issues of pinching antennas, such as antenna activation, the architecture of a pinching-antenna array, and the array gains, have been investigated in [4]–[6]. Antenna placement is key to realizing the full potential of pinching-antenna systems, where various designs and their impact on the system throughput have been investigated in [7], [8]. Channel estimation and beam training are crucial issues to pinching-antenna systems, and sophisticated designs using the flexibility features of pinching antennas have been developed in [9], [10]. For many resource allocation problems encountered in pinching-antenna systems, the use of conventional convex optimization leads to high computational complexity, which motivates the application of advanced learning methods [11], [12]. The applications of pinching antennas to improve the uplink throughput and the security of communication networks have also been recently investigated in [13], [14].", + "bbox": [ + 73, + 542, + 491, + 844 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "However, we note that the impact of pinching antennas on the sensing functionality of wireless networks has not yet been fully characterized in the literature, although the recent work in [15] demonstrated the importance of pinching", + "bbox": [ + 73, + 845, + 491, + 906 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Z. Ding is with the University of Manchester, Manchester, M1 9BB, UK, and Khalifa University, Abu Dhabi, UAE.", + "bbox": [ + 73, + 907, + 491, + 931 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "antennas in integrated sensing and communication (ISAC) systems [16], which motivates this letter. In particular, in this letter, the Cramér-Rao lower bound (CRLB) is used as the performance metric to characterize the capability of pinching antennas for enhancing the positioning accuracy of ISAC networks. The CRLB achieved by pinching antennas is first derived in the letter, and then compared to conventional antennas. The presented analytical results reveal that the use of pinching antennas can ensure that users at different locations experience uniform positioning accuracy, whereas the use of conventional antennas can result in a significant disparity in accuracy among the users. In addition, the important properties of CRLB achieved by pinching antennas, such as the effects of antenna placement and the local maximums of CRLB, are also investigated. Furthermore, this letter also reveals that the low-cost and reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning.", + "bbox": [ + 501, + 114, + 924, + 372 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "II. SYSTEM MODEL", + "text_level": 1, + "bbox": [ + 640, + 388, + 785, + 402 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Consider a pinching-antenna system that is deployed to provide ISAC services to $M$ single-antenna users, denoted by $\\mathrm{U}_m$ . Given the fact that there is already a rich literature on using pinching antennas to enhance communications, and also due to space limitations, the impact of pinching antennas on the sensing functionality is focused on in this letter. Without loss of generality, assume that $N$ pinching antennas are activated on $N_{\\mathrm{WG}}$ waveguides. The location of the $n$ -th pinching antenna is denoted by $\\psi_n^{\\mathrm{Pin}} = (x_n^{\\mathrm{Pin}}, y_n^{\\mathrm{Pin}}, d_{\\mathrm{H}})$ , where $d_{\\mathrm{H}}$ denotes the height of the waveguides.", + "bbox": [ + 501, + 412, + 921, + 564 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The service area is denoted by $\\mathcal{A}$ and is assumed to be a rectangle with its two sides denoted by $D_{\\mathrm{W}}$ and $D_{\\mathrm{L}}$ , respectively, and its center located at $(0,0,0)$ . The users are assumed to be uniformly distributed in $\\mathcal{A}$ , and $\\mathrm{U}_m$ 's location is denoted by $\\psi_m = (x_m,y_m,0)$ .", + "bbox": [ + 503, + 565, + 921, + 641 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Denote the distance from the $n$ -th pinching antenna to the $m$ -th user by $d_{mn}$ . Distance (range) estimates for the $m$ -th user can be modeled as follows: [17]", + "bbox": [ + 503, + 641, + 921, + 686 + ], + "page_idx": 0 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {d} _ {m n} = d _ {m n} + w _ {m n}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 640, + 696, + 919, + 714 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "where $d_{mn} = \\sqrt{(x_m - x_n^{\\mathrm{Pin}})^2 + (y_m - y_n^{\\mathrm{Pin}})^2 + d_{\\mathrm{H}}^2}$ , and $w_{mn}$ is a zero-mean Gaussian distributed noise term whose variance is distance-dependent, i.e.,", + "bbox": [ + 503, + 727, + 921, + 779 + ], + "page_idx": 0 + }, + { + "type": "equation", + "text": "\n$$\n\\sigma_ {m n} ^ {2} = K _ {E} \\left(\\left(x _ {m} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + \\left(y _ {m} - y _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 524, + 787, + 919, + 811 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$K_{E}$ denotes a system parameter decided by the range estimation environment.", + "bbox": [ + 503, + 820, + 919, + 849 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "III. IMPACT OF PINCHING ANTENNAS ON POSITIONING", + "text_level": 1, + "bbox": [ + 514, + 871, + 906, + 885 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "A. CRLB Achieved by Pinching-Antenna Systems", + "text_level": 1, + "bbox": [ + 503, + 891, + 841, + 907 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Without loss of generality, the impact of pinching antennas on $\\mathrm{U}_m$ 's localization is focused on. The joint probability den", + "bbox": [ + 503, + 914, + 921, + 945 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.05792v1 [cs.IT] 8 Apr 2025", + "bbox": [ + 22, + 255, + 58, + 676 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "sity function (pdf) of $\\hat{d}_{mn}$ conditioned on $d_{mn}$ , $1\\leq n\\leq N$ , is given by", + "bbox": [ + 73, + 66, + 491, + 99 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nf (\\hat {d} _ {m 1}, \\dots , \\hat {d} _ {m N}) = \\prod_ {n = 1} ^ {N} \\frac {1}{\\sqrt {2 \\pi \\sigma_ {m n} ^ {2}}} e ^ {- \\frac {(\\hat {d} _ {m n} - d _ {m n}) ^ {2}}{2 \\sigma_ {m n} ^ {2}}}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 102, + 104, + 491, + 147 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "whose log-likelihood function is given by", + "bbox": [ + 75, + 151, + 359, + 167 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} L \\triangleq \\ln f (\\hat {d} _ {m 1}, \\dots , \\hat {d} _ {m N}) = - \\frac {N}{2} \\ln (2 \\pi) \\tag {4} \\\\ - \\sum_ {n = 1} ^ {N} \\ln \\sigma_ {m n} - \\sum_ {n = 1} ^ {N} \\frac {(\\hat {d} _ {m n} - d _ {m n}) ^ {2}}{2 \\sigma_ {m n} ^ {2}}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 142, + 172, + 488, + 244 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recall that the CRLB for $x_{m}$ and $y_{m}$ is given by", + "bbox": [ + 75, + 250, + 413, + 265 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {E} \\left\\{\\left(\\hat {x} _ {m} - x _ {m}\\right) ^ {2} + \\left(\\hat {y} _ {m} - y _ {m}\\right) ^ {2} \\right\\} \\geq \\frac {1}{J _ {x} ^ {m}} + \\frac {1}{J _ {y} ^ {m}} \\triangleq \\mathrm {C R B} _ {m}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 91, + 268, + 488, + 299 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $\\hat{x}_m$ and $\\hat{y}_m$ denote the estimates of $x_m$ and $y_m$ , respectively, $J_x^m = \\mathcal{E}\\left\\{-\\frac{\\partial^2L}{\\partial x_m^2}\\right\\}$ and $J_y^m = \\mathcal{E}\\left\\{-\\frac{\\partial^2L}{\\partial y_m^2}\\right\\}$ .", + "bbox": [ + 75, + 304, + 488, + 340 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "$\\frac{\\partial L}{\\partial x_m}$ can be obtained as follows:", + "bbox": [ + 91, + 339, + 315, + 358 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\frac {\\partial L}{\\partial x _ {m}} = - \\sum_ {n = 1} ^ {N} \\frac {1}{\\sigma_ {m n}} \\frac {\\partial \\sigma_ {m n}}{\\partial x _ {m}} - \\sum_ {n = 1} ^ {N} \\frac {\\left(d _ {m n} - \\hat {d} _ {m n}\\right)}{\\sigma_ {m n} ^ {2}} \\frac {\\partial d _ {m n}}{\\partial x _ {m}} \\tag {6} \\\\ + \\sum_ {n = 1} ^ {N} \\frac {(\\hat {d} _ {m n} - d _ {m n}) ^ {2}}{\\sigma_ {m n} ^ {3}} \\frac {\\partial \\sigma_ {m n}}{\\partial x _ {m}}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 91, + 364, + 488, + 448 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The expression of $\\frac{\\partial^2L}{\\partial x_m^2}$ is quite invoked; however, by using the fact that $\\mathcal{E}\\{\\hat{d}_{mn} - d_{mn}\\} = 0$ and following the steps similar to those in [17], the expectation of $\\frac{\\partial^2L}{\\partial x_m^2}$ , i.e., $J_x^m$ , can be obtained as follows:", + "bbox": [ + 73, + 455, + 491, + 521 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nJ _ {x} ^ {m} = \\sum_ {n = 1} ^ {N} \\frac {\\left(2 K _ {E} + 1\\right)}{\\sigma_ {m n} ^ {2}} \\frac {\\left(x _ {m} - x _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2}}{\\left(x _ {m} - x _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2} + \\left(y _ {m} - y _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 81, + 526, + 488, + 580 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "$J_{y}^{m}$ can be obtained in a similar form, which means that the CRLB for estimating $\\mathrm{U}_m$ 's location can be expressed as follows:", + "bbox": [ + 73, + 588, + 491, + 633 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathrm {C R B} _ {m} = \\frac {K _ {E}}{(2 K _ {E} + 1)} \\left(\\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(x _ {m} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2}}{\\left(\\left(x _ {m} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + \\left(y _ {m} - y _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}} \\right. \\\\ \\left. + \\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(y _ {m} - y _ {n} ^ {\\operatorname* {P i n}}\\right) ^ {2}}{\\left(\\left(x _ {m} - x _ {n} ^ {\\operatorname* {P i n}}\\right) ^ {2} + \\left(y _ {m} - y _ {n} ^ {\\operatorname* {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}}\\right). \\tag {8} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 76, + 637, + 488, + 752 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "B. Performance Analysis Based on CRLB", + "text_level": 1, + "bbox": [ + 75, + 770, + 359, + 785 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1) Performance Gain over Conventional Antennas: For the conventional-antenna benchmark, consider the use of a circular antenna array with its center located at $(0,0,0)$ and its radius being $\\frac{\\lambda}{4\\sin\\left(\\frac{\\pi}{N}\\right)}$ , which ensures that the minimal pairwise distance of the antennas is $\\frac{\\lambda}{2}$ , where $\\lambda$ denotes the wavelength. By using the fact that the users are uniformly distributed within the service area, the performance gain of pinching antennas over conventional antennas can be evaluated as follows:", + "bbox": [ + 73, + 787, + 491, + 900 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\Delta_ {\\mathrm {C R B}} = \\int_ {- \\frac {D _ {\\mathrm {L}}}{2}} ^ {\\frac {D _ {\\mathrm {L}}}{2}} \\int_ {- \\frac {D _ {\\mathrm {W}}}{2}} ^ {\\frac {D _ {\\mathrm {W}}}{2}} \\left(\\mathrm {C R B} _ {m} - \\mathrm {C R B} _ {m} ^ {\\text {C o n v}}\\right) \\frac {d y _ {m}}{D _ {\\mathrm {W}}} \\frac {d x _ {m}}{D _ {\\mathrm {L}}}, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 91, + 902, + 488, + 943 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $\\mathrm{CRB}_m^{\\mathrm{Conv}}$ can be obtained similarly to $\\mathrm{CRB}_m$ by replacing the locations of the pinching antennas with those of the conventional antennas. The performance gain in (9) can be straightforwardly evaluated via computer simulations, but a closed-form expression of $\\Delta_{\\mathrm{CRB}}$ is difficult to obtain due to the factional expression of the CRLB. We note that the performance gain of pinching antennas over conventional antennas can also be illustrated by simply focusing on the user which is located at $\\left(\\frac{D_{\\mathrm{L}}}{2},0,0\\right)$ . The use of conventional antennas can achieve the following CRLB:", + "bbox": [ + 501, + 66, + 921, + 220 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathrm {C R B} _ {m} ^ {\\mathrm {C o n v}} = \\left(\\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {C o n v}}\\right) ^ {2}}{\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {C o n v}}\\right) ^ {2} + (y _ {n} ^ {\\mathrm {C o n v}}) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}} \\right. \\\\ \\left. + \\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(y _ {n} ^ {\\text {C o n v}}\\right) ^ {2}}{\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\text {C o n v}}\\right) ^ {2} + \\left(y _ {n} ^ {\\text {C o n v}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}}\\right) \\frac {K _ {E}}{\\left(2 K _ {E} + 1\\right)} \\\\ \\stackrel {(a)} {\\approx} \\frac {K _ {E}}{(2 K _ {E} + 1)} \\left(\\frac {4 \\left(\\frac {D _ {\\mathrm {L}} ^ {2}}{4} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}{N D _ {\\mathrm {L}} ^ {2}} + \\frac {\\left(\\frac {D _ {\\mathrm {L}} ^ {2}}{4} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}{\\sum_ {n = 1} ^ {N} (y _ {n} ^ {\\mathrm {C o n v}}) ^ {2}}\\right) \\\\ \\xrightarrow {(b)} \\infty , \\tag {10} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 526, + 227, + 919, + 468 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where step (a) is due to the fact that the conventional antennas are clustered close to the center of the service area, and step (b) is due to the fact that $|y_{n}^{\\mathrm{Conv}}| \\to 0$ for conventional antennas, particularly for the case with high carrier frequencies (i.e., small wavelengths).", + "bbox": [ + 501, + 478, + 919, + 553 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "On the other hand, pinching antennas do not suffer the singularity issue experienced by conventional antennas. For example, for the user located at $\\left(\\frac{D_{\\mathrm{L}}}{2},0,0\\right)$ , the corresponding CRLB can be expressed as follows:", + "bbox": [ + 503, + 554, + 921, + 614 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathrm {C R B} _ {m} = \\frac {K _ {E}}{\\left(2 K _ {E} + 1\\right)} \\left(\\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2}}{\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + (y _ {n} ^ {\\mathrm {P i n}}) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}} \\right. \\\\ \\left. + \\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(y _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2}}{\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + \\left(y _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}}\\right). \\tag {11} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 511, + 623, + 919, + 782 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "For illustrative purposes, a simple upper bound on the CRLB achieved by pinching antennas can be obtained as follows:", + "bbox": [ + 503, + 789, + 919, + 821 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathrm {C R B} _ {m} \\leq \\frac {K _ {E}}{\\left(2 K _ {E} + 1\\right)} \\left(\\frac {\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2} + \\left(y _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}{\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2}} \\right. \\\\ \\left. + \\frac {\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\text {P i n}}\\right) ^ {2} + \\left(y _ {n} ^ {\\text {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}{\\left(y _ {n} ^ {\\text {P i n}}\\right) ^ {2}}\\right), \\tag {12} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 511, + 828, + 919, + 941 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $n$ is an arbitrary integer between 1 and $N$ . Because of the diverse locations of the $N$ pinching antennas, it is always possible to find $n \\in \\{1, \\dots, N\\}$ which yields a finite value for the upper bound shown in (12), i.e., the CRLB achieved by pinching antennas is always bounded.", + "bbox": [ + 73, + 68, + 491, + 145 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Remark 1: Unlike conventional antennas which can cause noticeable accuracy variations between users, the carried-out case study shows that pinching antennas have the ability to offer uniform positioning accuracy between the users.", + "bbox": [ + 73, + 145, + 491, + 205 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2) Flexible User-Centric Positioning: Due to their low-cost and reconfigurability features, the locations of pinching antennas can be tailored to a serving user for realizing flexible user-centric positioning. To facilitate the performance analysis, the association between the pinching antennas and the waveguides is required. Without loss of generality, assume that there are $\\tilde{N} = \\frac{N}{N_{\\mathrm{WG}}}$ pinching antennas on each waveguide. Denote the location of the $n$ -th antenna on the $i$ -th waveguide by $\\psi_{in}^{\\mathrm{Pin}} = (x_{in}^{\\mathrm{Pin}},y_{in}^{\\mathrm{Pin}},d_{\\mathrm{H}})$ . Furthermore, assume that the antennas are equally spaced, and define $\\Delta_x = |x_{in}^{\\mathrm{Pin}} - x_{im}^{\\mathrm{Pin}}|$ and $\\Delta_y = |x_{in}^{\\mathrm{Pin}} - x_{jn}^{\\mathrm{Pin}}|$ , $m\\neq n$ and $i\\neq j$ .", + "bbox": [ + 73, + 205, + 491, + 372 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For illustrative purposes, assume that all $N$ pinching antennas are activated in a square area with $\\mathrm{U}_m$ at its center, where $\\tilde{N} = N_{\\mathrm{WG}}$ and $\\Delta_x = \\Delta_y = \\Delta$ . This assumption is made to facilitate the performance analysis, and more practical setups will be considered in the simulation section. Define $\\bar{N} = \\frac{\\tilde{N}}{2}$ , and without loss of generality, assume that $\\bar{N}$ is an even number.", + "bbox": [ + 73, + 371, + 491, + 476 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "With these assumptions, the CRLB in (8) can be simplified as follows:", + "bbox": [ + 73, + 478, + 491, + 508 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {C R B} _ {m} = \\frac {\\frac {K _ {E} \\Delta^ {2}}{4 (2 K _ {E} + 1)}}{\\sum_ {i = 1} ^ {\\bar {N}} \\sum_ {n = 1} ^ {\\bar {N}} \\frac {(n - \\frac {1}{2}) ^ {2}}{\\beta_ {n i} ^ {2}}} + \\frac {\\frac {K _ {E} \\Delta^ {2}}{4 (2 K _ {E} + 1)}}{\\sum_ {i = 1} ^ {\\bar {N}} \\sum_ {n = 1} ^ {\\bar {N}} \\frac {(i - \\frac {1}{2}) ^ {2}}{\\beta_ {n i} ^ {2}}},\n$$\n", + "text_format": "latex", + "bbox": [ + 94, + 515, + 472, + 566 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $= \\left(n - \\frac{1}{2}\\right)^{2} + \\left(i - \\frac{1}{2}\\right)^{2} + \\frac{d_{\\mathrm{H}}^{2}}{\\Delta^{2}}$ . The above CRLB can be used to design the antenna placement, i.e., the optimal choice of $\\Delta$ for minimizing the CRLB. Computer simulations can be used to verify that $\\frac{\\partial^2\\mathrm{CRB}_m}{\\partial\\Delta^2} > 0$ , i.e., $\\mathrm{CRB}_m$ is a convex function of $\\Delta$ , and hence convex optimization solvers can be used to find the optimal solution of $\\Delta$ efficiently. To obtain an insightful understanding of the optimal choice of $\\Delta$ , a special case with $N = 4$ is focused on in the following. We note that this special case is important in practice, given the fact that using a small number of antennas is helpful in reducing system overhead. For the case with $N = 4$ , the CRLB can be simplified as follows:", + "bbox": [ + 73, + 577, + 491, + 763 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {C R B} _ {m} = \\frac {2 K _ {E} \\Delta^ {2}}{\\left(2 K _ {E} + 1\\right)} \\left(\\frac {1}{2} + \\frac {d _ {\\mathrm {H}} ^ {2}}{\\Delta^ {2}}\\right) ^ {2}, \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 156, + 772, + 491, + 809 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "whose first-order derivative is given by", + "bbox": [ + 73, + 816, + 344, + 832 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial \\Delta} = \\frac {4 K _ {E}}{(2 K _ {E} + 1)} \\left(\\frac {1}{2} \\Delta + \\frac {d _ {\\mathrm {H}} ^ {2}}{\\Delta}\\right) \\left(\\frac {1}{2} - \\frac {d _ {\\mathrm {H}} ^ {2}}{\\Delta^ {2}}\\right). \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 94, + 840, + 491, + 876 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The second-order derivative of $\\mathrm{CRB}_m$ is given by", + "bbox": [ + 73, + 883, + 419, + 898 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\partial^ {2} \\mathrm {C R B} _ {m}}{\\partial \\Delta^ {2}} = \\frac {4 K _ {E}}{\\left(2 K _ {E} + 1\\right)} \\left(\\frac {1}{4} + 3 \\frac {d _ {\\mathrm {H}} ^ {4}}{\\Delta^ {4}}\\right) > 0, \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 135, + 907, + 491, + 941 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "which means that $\\mathrm{CRB}_m$ is a convex function of $\\Delta$ . Therefore, the optimal solution of $\\Delta$ for minimizing the CRLB for the special case with $N = 4$ is given by", + "bbox": [ + 503, + 68, + 921, + 114 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\Delta^ {*} = \\sqrt {2} d _ {H}. \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 665, + 125, + 921, + 142 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Remark 2: An intuition is that the CRLB is minimized if all the antennas are placed as close to the user as possible, i.e., $\\Delta^{*} \\to 0$ (or $\\frac{\\lambda}{2}$ to avoid antenna coupling). (16) shows that this intuition is wrong, where the optimal antenna spacing is a function of the height of the waveguides.", + "bbox": [ + 503, + 152, + 921, + 228 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3) Local-Maximum Property of CRLB: In the proximity of each pinching antenna, $\\psi_{n}^{\\mathrm{Pin}}$ , there exists a local maximum of $\\mathrm{CRB}_m$ shown in (8). This local-maximum property can be revealed by studying $\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m}$ and $\\frac{\\partial\\mathrm{CRB}_m}{\\partial y_m}$ . Without loss of generality, $\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m}$ is focused, and can be expressed as follows:", + "bbox": [ + 503, + 228, + 921, + 309 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} = \\frac {K _ {E}}{(2 K _ {E} + 1)} \\left(- \\frac {1}{\\gamma_ {1} ^ {2}} [ \\gamma_ {2} - \\gamma_ {3} ] + \\frac {1}{\\gamma_ {4} ^ {2}} \\gamma_ {5}\\right), \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 524, + 316, + 921, + 352 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $d_{mn}^2 = \\left(x_m - x_n^{\\mathrm{Pin}}\\right)^2 +\\left(y_m - y_n^{\\mathrm{Pin}}\\right)^2 +d_{\\mathrm{H}}^2,$ $\\gamma_{1} = \\sum_{n = 1}^{N}\\frac{\\left(x_{m} - x_{n}^{\\mathrm{Pin}}\\right)^{2}}{d_{mn}^{4}},\\gamma_{2} = \\sum_{n = 1}^{N}\\frac{2\\left(x_{m} - x_{n}^{\\mathrm{Pin}}\\right)}{d_{mn}^{4}},\\gamma_{3} =$ $\\begin{array}{r}\\sum_{n = 1}^{N}\\frac{4\\left(x_m - x_n^{\\mathrm{Pin}}\\right)^3}{d_{mn}^6},\\gamma_4 = \\sum_{n = 1}^{N}\\frac{\\left(y_m - y_n^{\\mathrm{Pin}}\\right)^2}{d_{mn}^2},\\mathrm{and}\\gamma_5 = \\end{array}$ $\\begin{array}{r}\\sum_{n = 1}^{N}\\frac{4\\left(x_m - x_n^{\\mathrm{Pin}}\\right)\\left(y_m - y_n^{\\mathrm{Pin}}\\right)^2}{d_{mn}^6}. \\end{array}$", + "bbox": [ + 503, + 361, + 921, + 452 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Without loss of generality, assume that $\\mathrm{U}_m$ is in the proximity of the first pinching antenna on the first waveguide, i.e., $x_{m} = x_{11}^{\\mathrm{Pin}} + \\delta_{x}$ and $y_{m} = y_{11}^{\\mathrm{Pin}} + \\delta_{y}$ , where $\\delta_x\\to 0$ and $\\delta_y\\rightarrow 0$ . In this case, $\\gamma_{1}$ in (17) can be approximated as follows:", + "bbox": [ + 503, + 452, + 921, + 525 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\gamma_ {1} \\approx \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {n ^ {2} \\Delta_ {x} ^ {2}}{\\left(n ^ {2} \\Delta_ {x} ^ {2} + (i - 1) ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}, \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 560, + 534, + 921, + 580 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where the terms at the order of $\\delta_x^2$ are omitted. Similarly, by omitting the terms of $\\delta_x^2$ , $\\gamma_2$ can be approximated as follows:", + "bbox": [ + 503, + 590, + 919, + 623 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\gamma_ {2} \\approx \\sum_ {i = 1} ^ {\\frac {N}{N}} \\frac {2 \\delta_ {x}}{\\left(\\delta^ {2} + (i - 1) ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}} \\tag {19} \\\\ - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {2 n \\Delta_ {x}}{\\left(n ^ {2} \\Delta_ {x} ^ {2} + (i - 1) ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 552, + 632, + 919, + 728 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Similarly, $\\gamma_3, \\gamma_4$ and $\\gamma_5$ can be approximated as follows:", + "bbox": [ + 504, + 736, + 893, + 752 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\gamma_ {3} \\approx - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {4 n ^ {3} \\Delta_ {x} ^ {3}}{\\left(n ^ {2} \\Delta_ {x} ^ {2} + (i - 1) ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {3}}, \\tag {20}\n$$\n", + "text_format": "latex", + "bbox": [ + 539, + 761, + 919, + 808 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\gamma_ {4} \\approx \\sum_ {i = 1} ^ {\\frac {N}{N} - 1} \\sum_ {n = 1} ^ {\\tilde {N}} \\frac {i ^ {2} \\Delta_ {y} ^ {2}}{\\left((n - 1) ^ {2} \\Delta_ {x} ^ {2} + i ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}, \\tag {21}\n$$\n", + "text_format": "latex", + "bbox": [ + 539, + 810, + 919, + 857 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\gamma_ {5} \\approx - \\sum_ {i = 1} ^ {\\frac {N}{N} - 1} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {n \\Delta_ {x} i ^ {2} \\Delta_ {y} ^ {2}}{\\left(n ^ {2} \\Delta_ {x} ^ {2} + i \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {3}}. \\tag {22}\n$$\n", + "text_format": "latex", + "bbox": [ + 539, + 859, + 919, + 905 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To facilitate the analysis of this local-maximum property of CRLB, assume that $\\Delta_x = \\Delta_y = \\Delta \\gg d_{\\mathrm{H}}$ and $\\tilde{N} = \\frac{N}{\\tilde{N}}$ , which", + "bbox": [ + 503, + 914, + 921, + 948 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "means that $\\gamma_{1} = \\gamma_{3}$ , and hence the CRLB can be simplified as follows:", + "bbox": [ + 73, + 69, + 491, + 98 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} \\approx \\frac {\\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \\left[ - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\frac {2 \\delta_ {x}}{(\\delta^ {2} + (i - 1) ^ {2} \\Delta^ {2} + d _ {\\mathrm {H}} ^ {2}) ^ {2}} \\right. \\\\ \\left. + \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {2 n \\Delta}{\\bar {\\beta} _ {n i} ^ {2}} - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {4 n ^ {3} \\Delta^ {3}}{\\bar {\\beta} _ {n i} ^ {3}} - \\sum_ {i = 1} ^ {\\frac {N}{N} - 1} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {4 n ^ {3} \\Delta^ {3}}{\\bar {\\beta} _ {n i} ^ {3}} \\right], \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 84, + 101, + 480, + 202 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\bar{\\beta}_{ni} = (n^2 + (i - 1)^2)\\Delta^2 + d_{\\mathrm{H}}^2$", + "bbox": [ + 73, + 204, + 338, + 222 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Note that if $i = \\frac{N}{N}$ , $\\sum_{n=1}^{\\tilde{N}-1} \\frac{4n^3\\Delta^3}{\\left((n^2+(i-1)^2)\\Delta^2+d_{\\mathrm{H}}^2\\right)^3}$ is an insignificant term, which means that the CRLB can be further simplified as follows:", + "bbox": [ + 73, + 220, + 491, + 273 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} \\approx \\frac {\\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \\left[ - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\frac {2 \\delta_ {x}}{(\\delta^ {2} + (i - 1) ^ {2} \\Delta^ {2} + d _ {\\mathrm {H}} ^ {2}) ^ {2}} \\right. \\\\ \\left. + 2 \\Delta \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {n \\left(\\left((i - 1) ^ {2} - 3 n ^ {2}\\right) \\Delta^ {2} + d _ {\\mathrm {H}} ^ {2}\\right)}{\\left(\\left(n ^ {2} + (i - 1) ^ {2}\\right) \\Delta^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {3}} \\right]. \\tag {23} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 84, + 277, + 490, + 380 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For the case with $\\delta_x = 0$ , i.e., the user is located right underneath of the pinching antenna at $\\psi_{11}^{\\mathrm{Pin}}$ , by using the assumption that $\\Delta \\gg d$ , the CRLB can be expressed as follows:", + "bbox": [ + 73, + 382, + 491, + 441 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} \\approx \\frac {\\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \\frac {2}{\\Delta^ {3}} \\gamma_ {6}, \\tag {24}\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 441, + 488, + 476 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\gamma_6 = \\sum_{i=1}^{N} \\sum_{n=1}^{\\tilde{N}-1} \\frac{(i-1)^2 - 3n^2}{(n^2 + (i-1)^2)^3}$ . We note that the terms of $\\gamma_6$ decay rapidly by increasing $n$ and $i$ , i.e., $\\gamma_6$ can be approximated by keeping the dominant negative term ( $n = 1$ and $i = 1$ ) and the dominant positive term ( $n = 1$ and $i = 3$ ), i.e., $\\gamma_6 \\approx -3 + \\frac{1}{125}$ , which means $\\frac{\\partial \\mathrm{CRB}_m}{\\partial x_m} \\leq 0$ for the case with $\\delta_x = 0$ . For the case of $\\delta_x \\neq 0$ , the CRLB can be approximated as follows:", + "bbox": [ + 73, + 479, + 491, + 592 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} \\approx \\frac {\\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \\left[ - \\frac {2 \\delta_ {x}}{d _ {\\mathrm {H}} ^ {4}} + \\frac {2}{\\Delta^ {3}} \\gamma_ {6} \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 138, + 595, + 426, + 630 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Due to the assumption of $\\Delta \\gg d_{\\mathrm{H}}$ , the term $\\frac{2\\delta_x}{d_{\\mathrm{H}}^4}$ is dominant, and hence $\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m} >0$ if $\\delta_{x} < 0$ . In summary, $\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m} < 0$ if the user's location is $(x_{11}^{\\mathrm{Pin}},y_{11}^{\\mathrm{Pin}},0)$ , and $\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m} >0$ if the user's location is $(x_{11}^{\\mathrm{Pin}} + \\delta_x,y_{11}^{\\mathrm{Pin}} + \\delta_y,d_{\\mathrm{H}})$ . A similar conclusion can be established to $\\frac{\\partial\\mathrm{CRB}_m}{\\partial y_m}$ , which means that there exists a local maximum for the CRLB around $\\psi_{n}^{\\mathrm{Pin}}$", + "bbox": [ + 73, + 633, + 491, + 733 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Remark 3: The local maximum property of the CRLB indicates an interesting conflict between the communication and sensing functionalities of pinching antennas. In particular, placing a pinching antenna directly above the user might increase the user's data rate but also degrade positioning accuracy. In other words, this local maximum property reveals the importance of antenna placement in pinching-antenna assisted ISAC networks.", + "bbox": [ + 73, + 734, + 491, + 854 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "IV. NUMERICAL STUDIES", + "text_level": 1, + "bbox": [ + 189, + 864, + 375, + 878 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, computer simulation results are presented to demonstrate the impact of pinching antennas on the positioning accuracy, where $K_{E} = 0.01$ , $D_{\\mathrm{W}} = 10 \\mathrm{~m}$ and $D_{\\mathrm{L}} = 40 \\mathrm{~m}$ , unless stated otherwise.", + "bbox": [ + 73, + 883, + 491, + 944 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/de2fd461e68d196df1294074a08a6e8f1ae03b5be4657fc9a3ee1fe1bcd21315.jpg", + "image_caption": [ + "Fig. 1. Averaged CRLBs, $\\mathrm{CRB}_m$ , achieved by the considered antenna systems, where $N_{\\mathrm{WG}} = 2$ and $d = 3\\mathrm{m}$ . For the pinching-antenna system, on each waveguide, there are $\\frac{N}{N_{\\mathrm{WG}}}$ antennas, which are equally spaced. Due to the singularity issue experienced by conventional antennas discussed in Section III-B1, users are assumed to be excluded from a square area with its side being $a$ and its center at the origin." + ], + "image_footnote": [], + "bbox": [ + 573, + 65, + 856, + 234 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/8c86d7c82be13cfdb700185fedbcfc043903bcacc188a31c96bd78c0d022b95d.jpg", + "image_caption": [ + "(a) Conventional Antennas" + ], + "image_footnote": [], + "bbox": [ + 570, + 321, + 839, + 468 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/1471639b0f119a70aea449f2c23ef35cea4d5de252869d12349b3c54fafbf1c3.jpg", + "image_caption": [ + "(b) Pinching Antennas", + "Fig. 2. CRLBs achieved by the considered antenna systems. $N = 20$ , $N_{\\mathrm{WG}} = 2$ and $d = 3 \\, \\mathrm{m}$ . On each waveguide, there are $\\frac{N}{N_{\\mathrm{WG}}}$ antennas, which are equally spaced." + ], + "image_footnote": [], + "bbox": [ + 573, + 507, + 836, + 652 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In Fig. 1, the averaged CRLBs achieved by the conventional and pinching-antenna systems are shown as functions of the number of antennas, where $\\mathrm{U}_m$ is assumed to be uniformly deployed in the service area. Because the conventional-antenna system suffers the singularity issue discussed in Section III-B1, it is assumed that $\\mathrm{U}_m$ cannot be located in a square area with its side being $a$ and its center at the origin. As can be seen from Fig. 1, the use of pinching antennas yields a significant performance gain over conventional antennas, regardless of the choices of $N$ and $a$ .", + "bbox": [ + 501, + 715, + 921, + 866 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Fig. 2 is provided to highlight the fact that a user's positioning accuracy depends on its location. On the one hand, Fig. 2(a) shows that for conventional antennas, a user can experience extremely poor positioning accuracy if it is located far away from the center of the service area, which", + "bbox": [ + 501, + 869, + 921, + 945 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 911, + 31, + 919, + 40 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/214063bb72dbd9a11d4eb4e79e473fdc96c3ca35a008baf61c81991492ed251a.jpg", + "image_caption": [ + "(a) Positioning with a focal point at $\\left(-\\frac{D_{\\mathrm{L}}}{4},0,0\\right)$" + ], + "image_footnote": [], + "bbox": [ + 145, + 51, + 411, + 196 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/5959a4483a3d708e1fda07cb15e0ba8ae7a2653fca446613aace2299d7205a6c.jpg", + "image_caption": [ + "(b) Positioning with a focal point at $\\left(\\frac{D_{\\mathrm{L}}}{4},0,0\\right)$", + "Fig. 3. Using pinching antennas to achieve flexible user-centric positioning. $N = 20$ , $N_{\\mathrm{WG}} = 2$ and $d = 3 \\mathrm{~m}$ . On each waveguide, there are $\\frac{N}{N_{\\mathrm{WG}}}$ antennas, which are equally spaced in a segment with its length being $\\frac{D_{\\mathrm{L}}}{2}$ and its center at the focal points shown in the figures." + ], + "image_footnote": [], + "bbox": [ + 143, + 239, + 406, + 385 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "confirms the analytical results shown in (10). On the other hand, Fig. 2(b) shows that the use of pinching antennas ensures reasonably accurate positioning, regardless of whether the user is at the center or the edge of the service area. This also means that for the multi-user scenario, using pinching antennas can ensure fairness for the users' positioning accuracy. We note that in Fig. 2(b), local maximums are clearly visible in the proximity of the pinching antennas, which confirms the analysis shown in Section III-B3.", + "bbox": [ + 73, + 465, + 490, + 599 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Recall that one key feature of pinching antennas is their reconfiguration capabilities, where the number and the locations of the antennas can be changed in a flexible manner. Fig. 3 demonstrates how this reconfiguration feature can be used to achieve flexible user-centric positioning. In particular, Figs. 3(a) and 3(b) show that by activating the pinching antennas close to the intended user locations, different focal points can be realized, which means that users close to these focal points can enjoy high positioning accuracy. For the case where the pinching antennas are clustered close to a user, Fig. 4 is provided to show the impact of the antenna spacing on the CRLB, where the accuracy of the analytical results developed in (16) is also verified.", + "bbox": [ + 73, + 601, + 491, + 796 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "V. CONCLUSIONS", + "text_level": 1, + "bbox": [ + 217, + 804, + 346, + 818 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This letter investigated how the key features of pinching antennas can be used to support ISAC from the CRLB perspective. In particular, the CRLB achieved by pinching antennas was first derived and then compared to that of conventional antennas. The presented analytical and simulation results demonstrated that the use of pinching antennas can significantly reduce CRLB and, hence, enhance the sensing capability. In addition, this letter showed that the low-cost and", + "bbox": [ + 73, + 823, + 491, + 946 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/95e52804193e9fcc17868d063866468da9677490c21f4494ad8893fb6d18da17.jpg", + "image_caption": [ + "Fig. 4. Impact of the antenna spacing on the CRLB. $N = 4$ pinching antennas are activated in a square-shape area with the antenna spacing being $\\Delta$ and $\\mathrm{U}_m$ located at the center of the area, where $N_{\\mathrm{WG}} = 2$ . The analytical results are based on (16)." + ], + "image_footnote": [], + "bbox": [ + 573, + 40, + 857, + 210 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning.", + "bbox": [ + 503, + 268, + 921, + 299 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 663, + 306, + 761, + 318 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] A. Fukuda, H. Yamamoto, H. Okazaki, Y. Suzuki, and K. Kawai, \"Pinching antenna - using a dielectric waveguide as an antenna,\" NTT DOCOMO Technical J., vol. 23, no. 3, pp. 5-12, Jan. 2022.", + "[2] Z. Ding, R. Schober, and H. V. Poor, \"Flexible-antenna systems: A pinching-antenna perspective,\" IEEE Trans. Commun., (to appear in 2025) Available on-line at arXiv:2412.02376.", + "[3] Z. Ding and H. V. Poor, “Los blockage in pinching-antenna systems: Curse or blessing?” IEEE Wireless Commun. Lett., (submitted) Available on-line at arXiv:2503.08554.", + "[4] K. Wang, Z. Ding, and R. Schober, \"Antenna activation for NOMA assisted pinching-antenna systems,\" IEEE Commun. Lett., (to appear in 2025) Available on-line at arXiv:2412.13969.", + "[5] C. Ouyang, Z. Wang, Y. Liu, and Z. Ding, \"Array gain for pinching-antenna systems (PASS),\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2501.05657.", + "[6] Z. Wang, C. Ouyang, X. Mu, Y. Liu, and Z. Ding, \"Modeling and beamforming optimization for pinching-antenna systems,\" IEEE Trans. Wireless Commun., (submitted) Available on-line at arXiv:2502.05917.", + "[7] Y. Xu, Z. Ding, and G. Karagiannidis, \"Rate maximization for downlink pinching-antenna systems,\" IEEE Commun. Lett., (to appear in 2025) Available on-line at arXiv:2502.12629.", + "[8] X. Mu, G. Zhu, and Y. Liu, \"Pinching-antenna system (PASS)-enabled multicast communications,\" IEEE Trans. Commun., (submitted) Available on-line at arXiv:2502.16624.", + "[9] J. Xiao, J. Wang, and Y. Liu, \"Channel estimation for pinching-antenna systems (PASS),\" IEEE Trans. Commun., (submitted) Available on-line at arXiv:2503.13268.", + "[10] ——, “Beam training for pinching-antenna systems (PASS),” IEEE Trans. Wireless Commun., (submitted) Available on-line at arXiv:2502.05921.", + "[11] X. Xie, Y. Lu, and Z. Ding, \"Graph neural network enabled pinching antennas,\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2502.05447.", + "[12] J. Guo, Y. Liu, and A. Nallanathan, \"GPASS: Deep learning for beamforming in pinching-antenna systems (PASS),\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2502.01438.", + "[13] S. A. Tegos, P. D. Diamantoulakis, Z. Ding, and G. K. Karagiannidis, \"Minimum data rate maximization for uplink pinching-antenna systems,\" IEEE Wireless Commun. Lett., (to appear in 2025) Available on-line at arXiv:2412.13892.", + "[14] M. Sun, C. Ouyang, S. Wu, and Y. Liu, \"Physical layer security for pinching-antenna systems (PASS),\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2503.09075.", + "[15] Y. Qin, Y. Fu, and H. Zhang, \"Joint antenna position and transmit power optimization for pinching antenna-assisted ISAC systems,\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2503.12872.", + "[16] F. Liu, Y. Cui, C. Masouros, J. Xu, T. X. Han, Y. C. Eldar, and S. Buzzi, \"Integrated sensing and communications: Toward dual-functional wireless networks for 6G and beyond,\" IEEE J. Sel. Areas Commun., vol. 40, no. 6, pp. 1728-1767, 2022.", + "[17] T. Jia and R. M. Buehrer, “A new cramer-rao lower bound for TOA-based localization,” in Proc. Military Commun. Conf. (MILCOM 2008), Nov. 2008, pp. 1-5." + ], + "bbox": [ + 506, + 325, + 921, + 926 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 4 + } +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05792/2e1d882c-c0bf-4c6f-98d8-3ae7d8fb9f26_model.json b/data/2025/2504_05xxx/2504.05792/2e1d882c-c0bf-4c6f-98d8-3ae7d8fb9f26_model.json new file mode 100644 index 0000000000000000000000000000000000000000..599c7adb339f39884495d69f7d65e66bb6d591f8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/2e1d882c-c0bf-4c6f-98d8-3ae7d8fb9f26_model.json @@ -0,0 +1,1442 @@ +[ + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "1" + }, + { + "type": "title", + "bbox": [ + 0.125, + 0.042, + 0.873, + 0.07 + ], + "angle": 0, + "content": "Pinching-Antenna Assisted ISAC: A CRLB Perspective" + }, + { + "type": "text", + "bbox": [ + 0.391, + 0.08, + 0.6, + 0.096 + ], + "angle": 0, + "content": "Zhiguo Ding, Fellow, IEEE" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.115, + 0.493, + 0.282 + ], + "angle": 0, + "content": "Abstract—Recently, pinching antennas have attracted significant research interest due to their capability to reconfigure wireless channels as well as their array configuration flexibility. This letter focuses on how these features can be used to support integrated sensing and communications (ISAC) from the Cramér-Rao lower bound (CRLB) perspective. In particular, the CRLB achieved by pinching antennas is first derived and then compared to that of conventional antennas. The presented analytical and simulation results demonstrate that using pinching antennas can significantly reduce CRLB and, hence, enhance positioning accuracy. In addition, this letter also reveals that the low-cost and reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.284, + 0.492, + 0.323 + ], + "angle": 0, + "content": "Index Terms—Pinching antennas, integrated sensing and communications (ISAC), Cramér-Rao lower bound (CRLB), estimation theory." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.327, + 0.35, + 0.34 + ], + "angle": 0, + "content": "I. INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.347, + 0.492, + 0.542 + ], + "angle": 0, + "content": "Recently, pinching antennas have received significant attention from both academia and industry as a novel evolution of smart antennas, and offer three distinguished features [1], [2]. One is their capability to create strong line-of-sight (LoS) links between the transceivers, which means that large-scale path losses and LoS blockage can be effectively mitigated by activating antennas close to users [3]. The second feature is the reconfigurability of pinching-antenna systems, where the topology of a pinching-antenna array, e.g., the locations and the number of pinching antennas, can be flexibly adjusted. The third feature is their practicality, where DOCOMO's prototype shows that pinching antennas can be straightforwardly implemented in a low-cost manner [1]." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.543, + 0.493, + 0.845 + ], + "angle": 0, + "content": "In the literature, there already exists a large amount of work to demonstrate that the use of pinching antennas can significantly enhance the communication functionality of wireless networks. For example, the fundamental issues of pinching antennas, such as antenna activation, the architecture of a pinching-antenna array, and the array gains, have been investigated in [4]–[6]. Antenna placement is key to realizing the full potential of pinching-antenna systems, where various designs and their impact on the system throughput have been investigated in [7], [8]. Channel estimation and beam training are crucial issues to pinching-antenna systems, and sophisticated designs using the flexibility features of pinching antennas have been developed in [9], [10]. For many resource allocation problems encountered in pinching-antenna systems, the use of conventional convex optimization leads to high computational complexity, which motivates the application of advanced learning methods [11], [12]. The applications of pinching antennas to improve the uplink throughput and the security of communication networks have also been recently investigated in [13], [14]." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.846, + 0.492, + 0.907 + ], + "angle": 0, + "content": "However, we note that the impact of pinching antennas on the sensing functionality of wireless networks has not yet been fully characterized in the literature, although the recent work in [15] demonstrated the importance of pinching" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.908, + 0.492, + 0.933 + ], + "angle": 0, + "content": "Z. Ding is with the University of Manchester, Manchester, M1 9BB, UK, and Khalifa University, Abu Dhabi, UAE." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.115, + 0.925, + 0.373 + ], + "angle": 0, + "content": "antennas in integrated sensing and communication (ISAC) systems [16], which motivates this letter. In particular, in this letter, the Cramér-Rao lower bound (CRLB) is used as the performance metric to characterize the capability of pinching antennas for enhancing the positioning accuracy of ISAC networks. The CRLB achieved by pinching antennas is first derived in the letter, and then compared to conventional antennas. The presented analytical results reveal that the use of pinching antennas can ensure that users at different locations experience uniform positioning accuracy, whereas the use of conventional antennas can result in a significant disparity in accuracy among the users. In addition, the important properties of CRLB achieved by pinching antennas, such as the effects of antenna placement and the local maximums of CRLB, are also investigated. Furthermore, this letter also reveals that the low-cost and reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning." + }, + { + "type": "title", + "bbox": [ + 0.641, + 0.389, + 0.787, + 0.403 + ], + "angle": 0, + "content": "II. SYSTEM MODEL" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.413, + 0.922, + 0.565 + ], + "angle": 0, + "content": "Consider a pinching-antenna system that is deployed to provide ISAC services to \\(M\\) single-antenna users, denoted by \\(\\mathrm{U}_m\\). Given the fact that there is already a rich literature on using pinching antennas to enhance communications, and also due to space limitations, the impact of pinching antennas on the sensing functionality is focused on in this letter. Without loss of generality, assume that \\(N\\) pinching antennas are activated on \\(N_{\\mathrm{WG}}\\) waveguides. The location of the \\(n\\)-th pinching antenna is denoted by \\(\\psi_n^{\\mathrm{Pin}} = (x_n^{\\mathrm{Pin}}, y_n^{\\mathrm{Pin}}, d_{\\mathrm{H}})\\), where \\(d_{\\mathrm{H}}\\) denotes the height of the waveguides." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.566, + 0.922, + 0.642 + ], + "angle": 0, + "content": "The service area is denoted by \\(\\mathcal{A}\\) and is assumed to be a rectangle with its two sides denoted by \\(D_{\\mathrm{W}}\\) and \\(D_{\\mathrm{L}}\\), respectively, and its center located at \\((0,0,0)\\). The users are assumed to be uniformly distributed in \\(\\mathcal{A}\\), and \\(\\mathrm{U}_m\\)'s location is denoted by \\(\\psi_m = (x_m,y_m,0)\\)." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.642, + 0.922, + 0.687 + ], + "angle": 0, + "content": "Denote the distance from the \\(n\\)-th pinching antenna to the \\(m\\)-th user by \\(d_{mn}\\). Distance (range) estimates for the \\(m\\)-th user can be modeled as follows: [17]" + }, + { + "type": "equation", + "bbox": [ + 0.641, + 0.697, + 0.921, + 0.715 + ], + "angle": 0, + "content": "\\[\n\\hat {d} _ {m n} = d _ {m n} + w _ {m n}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.728, + 0.922, + 0.78 + ], + "angle": 0, + "content": "where \\( d_{mn} = \\sqrt{(x_m - x_n^{\\mathrm{Pin}})^2 + (y_m - y_n^{\\mathrm{Pin}})^2 + d_{\\mathrm{H}}^2} \\), and \\( w_{mn} \\) is a zero-mean Gaussian distributed noise term whose variance is distance-dependent, i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.525, + 0.788, + 0.921, + 0.813 + ], + "angle": 0, + "content": "\\[\n\\sigma_ {m n} ^ {2} = K _ {E} \\left(\\left(x _ {m} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + \\left(y _ {m} - y _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.821, + 0.921, + 0.851 + ], + "angle": 0, + "content": "\\(K_{E}\\) denotes a system parameter decided by the range estimation environment." + }, + { + "type": "title", + "bbox": [ + 0.516, + 0.872, + 0.908, + 0.886 + ], + "angle": 0, + "content": "III. IMPACT OF PINCHING ANTENNAS ON POSITIONING" + }, + { + "type": "title", + "bbox": [ + 0.504, + 0.892, + 0.842, + 0.908 + ], + "angle": 0, + "content": "A. CRLB Achieved by Pinching-Antenna Systems" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.915, + 0.922, + 0.946 + ], + "angle": 0, + "content": "Without loss of generality, the impact of pinching antennas on \\(\\mathrm{U}_m\\) 's localization is focused on. The joint probability den" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.256, + 0.059, + 0.678 + ], + "angle": 270, + "content": "arXiv:2504.05792v1 [cs.IT] 8 Apr 2025" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "2" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.068, + 0.493, + 0.1 + ], + "angle": 0, + "content": "sity function (pdf) of \\(\\hat{d}_{mn}\\) conditioned on \\(d_{mn}\\), \\(1\\leq n\\leq N\\), is given by" + }, + { + "type": "equation", + "bbox": [ + 0.103, + 0.106, + 0.493, + 0.148 + ], + "angle": 0, + "content": "\\[\nf (\\hat {d} _ {m 1}, \\dots , \\hat {d} _ {m N}) = \\prod_ {n = 1} ^ {N} \\frac {1}{\\sqrt {2 \\pi \\sigma_ {m n} ^ {2}}} e ^ {- \\frac {(\\hat {d} _ {m n} - d _ {m n}) ^ {2}}{2 \\sigma_ {m n} ^ {2}}}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.152, + 0.361, + 0.168 + ], + "angle": 0, + "content": "whose log-likelihood function is given by" + }, + { + "type": "equation", + "bbox": [ + 0.143, + 0.174, + 0.49, + 0.245 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} L \\triangleq \\ln f (\\hat {d} _ {m 1}, \\dots , \\hat {d} _ {m N}) = - \\frac {N}{2} \\ln (2 \\pi) \\tag {4} \\\\ - \\sum_ {n = 1} ^ {N} \\ln \\sigma_ {m n} - \\sum_ {n = 1} ^ {N} \\frac {(\\hat {d} _ {m n} - d _ {m n}) ^ {2}}{2 \\sigma_ {m n} ^ {2}}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.25, + 0.415, + 0.266 + ], + "angle": 0, + "content": "Recall that the CRLB for \\( x_{m} \\) and \\( y_{m} \\) is given by" + }, + { + "type": "equation", + "bbox": [ + 0.093, + 0.27, + 0.49, + 0.3 + ], + "angle": 0, + "content": "\\[\n\\mathcal {E} \\left\\{\\left(\\hat {x} _ {m} - x _ {m}\\right) ^ {2} + \\left(\\hat {y} _ {m} - y _ {m}\\right) ^ {2} \\right\\} \\geq \\frac {1}{J _ {x} ^ {m}} + \\frac {1}{J _ {y} ^ {m}} \\triangleq \\mathrm {C R B} _ {m}, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.305, + 0.49, + 0.341 + ], + "angle": 0, + "content": "where \\(\\hat{x}_m\\) and \\(\\hat{y}_m\\) denote the estimates of \\(x_m\\) and \\(y_m\\), respectively, \\(J_x^m = \\mathcal{E}\\left\\{-\\frac{\\partial^2L}{\\partial x_m^2}\\right\\}\\) and \\(J_y^m = \\mathcal{E}\\left\\{-\\frac{\\partial^2L}{\\partial y_m^2}\\right\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.34, + 0.316, + 0.359 + ], + "angle": 0, + "content": "\\(\\frac{\\partial L}{\\partial x_m}\\) can be obtained as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.092, + 0.365, + 0.49, + 0.449 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\frac {\\partial L}{\\partial x _ {m}} = - \\sum_ {n = 1} ^ {N} \\frac {1}{\\sigma_ {m n}} \\frac {\\partial \\sigma_ {m n}}{\\partial x _ {m}} - \\sum_ {n = 1} ^ {N} \\frac {\\left(d _ {m n} - \\hat {d} _ {m n}\\right)}{\\sigma_ {m n} ^ {2}} \\frac {\\partial d _ {m n}}{\\partial x _ {m}} \\tag {6} \\\\ + \\sum_ {n = 1} ^ {N} \\frac {(\\hat {d} _ {m n} - d _ {m n}) ^ {2}}{\\sigma_ {m n} ^ {3}} \\frac {\\partial \\sigma_ {m n}}{\\partial x _ {m}}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.456, + 0.492, + 0.522 + ], + "angle": 0, + "content": "The expression of \\(\\frac{\\partial^2L}{\\partial x_m^2}\\) is quite invoked; however, by using the fact that \\(\\mathcal{E}\\{\\hat{d}_{mn} - d_{mn}\\} = 0\\) and following the steps similar to those in [17], the expectation of \\(\\frac{\\partial^2L}{\\partial x_m^2}\\), i.e., \\(J_x^m\\), can be obtained as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.082, + 0.527, + 0.49, + 0.582 + ], + "angle": 0, + "content": "\\[\nJ _ {x} ^ {m} = \\sum_ {n = 1} ^ {N} \\frac {\\left(2 K _ {E} + 1\\right)}{\\sigma_ {m n} ^ {2}} \\frac {\\left(x _ {m} - x _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2}}{\\left(x _ {m} - x _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2} + \\left(y _ {m} - y _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}}. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.589, + 0.493, + 0.634 + ], + "angle": 0, + "content": "\\(J_{y}^{m}\\) can be obtained in a similar form, which means that the CRLB for estimating \\(\\mathrm{U}_m\\) 's location can be expressed as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.077, + 0.638, + 0.49, + 0.753 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathrm {C R B} _ {m} = \\frac {K _ {E}}{(2 K _ {E} + 1)} \\left(\\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(x _ {m} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2}}{\\left(\\left(x _ {m} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + \\left(y _ {m} - y _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}} \\right. \\\\ \\left. + \\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(y _ {m} - y _ {n} ^ {\\operatorname* {P i n}}\\right) ^ {2}}{\\left(\\left(x _ {m} - x _ {n} ^ {\\operatorname* {P i n}}\\right) ^ {2} + \\left(y _ {m} - y _ {n} ^ {\\operatorname* {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}}\\right). \\tag {8} \\\\ \\end{array}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.771, + 0.361, + 0.786 + ], + "angle": 0, + "content": "B. Performance Analysis Based on CRLB" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.788, + 0.493, + 0.901 + ], + "angle": 0, + "content": "1) Performance Gain over Conventional Antennas: For the conventional-antenna benchmark, consider the use of a circular antenna array with its center located at \\((0,0,0)\\) and its radius being \\(\\frac{\\lambda}{4\\sin\\left(\\frac{\\pi}{N}\\right)}\\), which ensures that the minimal pairwise distance of the antennas is \\(\\frac{\\lambda}{2}\\), where \\(\\lambda\\) denotes the wavelength. By using the fact that the users are uniformly distributed within the service area, the performance gain of pinching antennas over conventional antennas can be evaluated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.093, + 0.904, + 0.49, + 0.944 + ], + "angle": 0, + "content": "\\[\n\\Delta_ {\\mathrm {C R B}} = \\int_ {- \\frac {D _ {\\mathrm {L}}}{2}} ^ {\\frac {D _ {\\mathrm {L}}}{2}} \\int_ {- \\frac {D _ {\\mathrm {W}}}{2}} ^ {\\frac {D _ {\\mathrm {W}}}{2}} \\left(\\mathrm {C R B} _ {m} - \\mathrm {C R B} _ {m} ^ {\\text {C o n v}}\\right) \\frac {d y _ {m}}{D _ {\\mathrm {W}}} \\frac {d x _ {m}}{D _ {\\mathrm {L}}}, \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.068, + 0.923, + 0.221 + ], + "angle": 0, + "content": "where \\(\\mathrm{CRB}_m^{\\mathrm{Conv}}\\) can be obtained similarly to \\(\\mathrm{CRB}_m\\) by replacing the locations of the pinching antennas with those of the conventional antennas. The performance gain in (9) can be straightforwardly evaluated via computer simulations, but a closed-form expression of \\(\\Delta_{\\mathrm{CRB}}\\) is difficult to obtain due to the factional expression of the CRLB. We note that the performance gain of pinching antennas over conventional antennas can also be illustrated by simply focusing on the user which is located at \\(\\left(\\frac{D_{\\mathrm{L}}}{2},0,0\\right)\\). The use of conventional antennas can achieve the following CRLB:" + }, + { + "type": "equation", + "bbox": [ + 0.527, + 0.228, + 0.921, + 0.469 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathrm {C R B} _ {m} ^ {\\mathrm {C o n v}} = \\left(\\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {C o n v}}\\right) ^ {2}}{\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {C o n v}}\\right) ^ {2} + (y _ {n} ^ {\\mathrm {C o n v}}) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}} \\right. \\\\ \\left. + \\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(y _ {n} ^ {\\text {C o n v}}\\right) ^ {2}}{\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\text {C o n v}}\\right) ^ {2} + \\left(y _ {n} ^ {\\text {C o n v}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}}\\right) \\frac {K _ {E}}{\\left(2 K _ {E} + 1\\right)} \\\\ \\stackrel {(a)} {\\approx} \\frac {K _ {E}}{(2 K _ {E} + 1)} \\left(\\frac {4 \\left(\\frac {D _ {\\mathrm {L}} ^ {2}}{4} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}{N D _ {\\mathrm {L}} ^ {2}} + \\frac {\\left(\\frac {D _ {\\mathrm {L}} ^ {2}}{4} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}{\\sum_ {n = 1} ^ {N} (y _ {n} ^ {\\mathrm {C o n v}}) ^ {2}}\\right) \\\\ \\xrightarrow {(b)} \\infty , \\tag {10} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.479, + 0.921, + 0.554 + ], + "angle": 0, + "content": "where step (a) is due to the fact that the conventional antennas are clustered close to the center of the service area, and step (b) is due to the fact that \\( |y_{n}^{\\mathrm{Conv}}| \\to 0 \\) for conventional antennas, particularly for the case with high carrier frequencies (i.e., small wavelengths)." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.555, + 0.922, + 0.616 + ], + "angle": 0, + "content": "On the other hand, pinching antennas do not suffer the singularity issue experienced by conventional antennas. For example, for the user located at \\(\\left(\\frac{D_{\\mathrm{L}}}{2},0,0\\right)\\), the corresponding CRLB can be expressed as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.512, + 0.624, + 0.921, + 0.784 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathrm {C R B} _ {m} = \\frac {K _ {E}}{\\left(2 K _ {E} + 1\\right)} \\left(\\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2}}{\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + (y _ {n} ^ {\\mathrm {P i n}}) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}} \\right. \\\\ \\left. + \\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(y _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2}}{\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + \\left(y _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}}\\right). \\tag {11} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.79, + 0.921, + 0.822 + ], + "angle": 0, + "content": "For illustrative purposes, a simple upper bound on the CRLB achieved by pinching antennas can be obtained as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.512, + 0.829, + 0.921, + 0.942 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathrm {C R B} _ {m} \\leq \\frac {K _ {E}}{\\left(2 K _ {E} + 1\\right)} \\left(\\frac {\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2} + \\left(y _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}{\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2}} \\right. \\\\ \\left. + \\frac {\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\text {P i n}}\\right) ^ {2} + \\left(y _ {n} ^ {\\text {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}{\\left(y _ {n} ^ {\\text {P i n}}\\right) ^ {2}}\\right), \\tag {12} \\\\ \\end{array}\n\\]" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.069, + 0.493, + 0.146 + ], + "angle": 0, + "content": "where \\( n \\) is an arbitrary integer between 1 and \\( N \\). Because of the diverse locations of the \\( N \\) pinching antennas, it is always possible to find \\( n \\in \\{1, \\dots, N\\} \\) which yields a finite value for the upper bound shown in (12), i.e., the CRLB achieved by pinching antennas is always bounded." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.146, + 0.493, + 0.206 + ], + "angle": 0, + "content": "Remark 1: Unlike conventional antennas which can cause noticeable accuracy variations between users, the carried-out case study shows that pinching antennas have the ability to offer uniform positioning accuracy between the users." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.206, + 0.493, + 0.373 + ], + "angle": 0, + "content": "2) Flexible User-Centric Positioning: Due to their low-cost and reconfigurability features, the locations of pinching antennas can be tailored to a serving user for realizing flexible user-centric positioning. To facilitate the performance analysis, the association between the pinching antennas and the waveguides is required. Without loss of generality, assume that there are \\(\\tilde{N} = \\frac{N}{N_{\\mathrm{WG}}}\\) pinching antennas on each waveguide. Denote the location of the \\(n\\) -th antenna on the \\(i\\) -th waveguide by \\(\\psi_{in}^{\\mathrm{Pin}} = (x_{in}^{\\mathrm{Pin}},y_{in}^{\\mathrm{Pin}},d_{\\mathrm{H}})\\). Furthermore, assume that the antennas are equally spaced, and define \\(\\Delta_x = |x_{in}^{\\mathrm{Pin}} - x_{im}^{\\mathrm{Pin}}|\\) and \\(\\Delta_y = |x_{in}^{\\mathrm{Pin}} - x_{jn}^{\\mathrm{Pin}}|\\), \\(m\\neq n\\) and \\(i\\neq j\\)." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.372, + 0.493, + 0.477 + ], + "angle": 0, + "content": "For illustrative purposes, assume that all \\(N\\) pinching antennas are activated in a square area with \\(\\mathrm{U}_m\\) at its center, where \\(\\tilde{N} = N_{\\mathrm{WG}}\\) and \\(\\Delta_x = \\Delta_y = \\Delta\\). This assumption is made to facilitate the performance analysis, and more practical setups will be considered in the simulation section. Define \\(\\bar{N} = \\frac{\\tilde{N}}{2}\\), and without loss of generality, assume that \\(\\bar{N}\\) is an even number." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.479, + 0.492, + 0.509 + ], + "angle": 0, + "content": "With these assumptions, the CRLB in (8) can be simplified as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.096, + 0.516, + 0.473, + 0.568 + ], + "angle": 0, + "content": "\\[\n\\mathrm {C R B} _ {m} = \\frac {\\frac {K _ {E} \\Delta^ {2}}{4 (2 K _ {E} + 1)}}{\\sum_ {i = 1} ^ {\\bar {N}} \\sum_ {n = 1} ^ {\\bar {N}} \\frac {(n - \\frac {1}{2}) ^ {2}}{\\beta_ {n i} ^ {2}}} + \\frac {\\frac {K _ {E} \\Delta^ {2}}{4 (2 K _ {E} + 1)}}{\\sum_ {i = 1} ^ {\\bar {N}} \\sum_ {n = 1} ^ {\\bar {N}} \\frac {(i - \\frac {1}{2}) ^ {2}}{\\beta_ {n i} ^ {2}}},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.578, + 0.493, + 0.764 + ], + "angle": 0, + "content": "where \\( = \\left(n - \\frac{1}{2}\\right)^{2} + \\left(i - \\frac{1}{2}\\right)^{2} + \\frac{d_{\\mathrm{H}}^{2}}{\\Delta^{2}} \\). The above CRLB can be used to design the antenna placement, i.e., the optimal choice of \\( \\Delta \\) for minimizing the CRLB. Computer simulations can be used to verify that \\( \\frac{\\partial^2\\mathrm{CRB}_m}{\\partial\\Delta^2} > 0 \\), i.e., \\( \\mathrm{CRB}_m \\) is a convex function of \\( \\Delta \\), and hence convex optimization solvers can be used to find the optimal solution of \\( \\Delta \\) efficiently. To obtain an insightful understanding of the optimal choice of \\( \\Delta \\), a special case with \\( N = 4 \\) is focused on in the following. We note that this special case is important in practice, given the fact that using a small number of antennas is helpful in reducing system overhead. For the case with \\( N = 4 \\), the CRLB can be simplified as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.157, + 0.773, + 0.493, + 0.81 + ], + "angle": 0, + "content": "\\[\n\\mathrm {C R B} _ {m} = \\frac {2 K _ {E} \\Delta^ {2}}{\\left(2 K _ {E} + 1\\right)} \\left(\\frac {1}{2} + \\frac {d _ {\\mathrm {H}} ^ {2}}{\\Delta^ {2}}\\right) ^ {2}, \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.817, + 0.345, + 0.833 + ], + "angle": 0, + "content": "whose first-order derivative is given by" + }, + { + "type": "equation", + "bbox": [ + 0.095, + 0.842, + 0.493, + 0.877 + ], + "angle": 0, + "content": "\\[\n\\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial \\Delta} = \\frac {4 K _ {E}}{(2 K _ {E} + 1)} \\left(\\frac {1}{2} \\Delta + \\frac {d _ {\\mathrm {H}} ^ {2}}{\\Delta}\\right) \\left(\\frac {1}{2} - \\frac {d _ {\\mathrm {H}} ^ {2}}{\\Delta^ {2}}\\right). \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.884, + 0.421, + 0.9 + ], + "angle": 0, + "content": "The second-order derivative of \\(\\mathrm{CRB}_m\\) is given by" + }, + { + "type": "equation", + "bbox": [ + 0.137, + 0.908, + 0.493, + 0.943 + ], + "angle": 0, + "content": "\\[\n\\frac {\\partial^ {2} \\mathrm {C R B} _ {m}}{\\partial \\Delta^ {2}} = \\frac {4 K _ {E}}{\\left(2 K _ {E} + 1\\right)} \\left(\\frac {1}{4} + 3 \\frac {d _ {\\mathrm {H}} ^ {4}}{\\Delta^ {4}}\\right) > 0, \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.069, + 0.923, + 0.115 + ], + "angle": 0, + "content": "which means that \\(\\mathrm{CRB}_m\\) is a convex function of \\(\\Delta\\). Therefore, the optimal solution of \\(\\Delta\\) for minimizing the CRLB for the special case with \\(N = 4\\) is given by" + }, + { + "type": "equation", + "bbox": [ + 0.666, + 0.125, + 0.922, + 0.143 + ], + "angle": 0, + "content": "\\[\n\\Delta^ {*} = \\sqrt {2} d _ {H}. \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.154, + 0.922, + 0.229 + ], + "angle": 0, + "content": "Remark 2: An intuition is that the CRLB is minimized if all the antennas are placed as close to the user as possible, i.e., \\(\\Delta^{*} \\to 0\\) (or \\(\\frac{\\lambda}{2}\\) to avoid antenna coupling). (16) shows that this intuition is wrong, where the optimal antenna spacing is a function of the height of the waveguides." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.229, + 0.922, + 0.31 + ], + "angle": 0, + "content": "3) Local-Maximum Property of CRLB: In the proximity of each pinching antenna, \\(\\psi_{n}^{\\mathrm{Pin}}\\), there exists a local maximum of \\(\\mathrm{CRB}_m\\) shown in (8). This local-maximum property can be revealed by studying \\(\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m}\\) and \\(\\frac{\\partial\\mathrm{CRB}_m}{\\partial y_m}\\). Without loss of generality, \\(\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m}\\) is focused, and can be expressed as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.526, + 0.318, + 0.922, + 0.353 + ], + "angle": 0, + "content": "\\[\n\\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} = \\frac {K _ {E}}{(2 K _ {E} + 1)} \\left(- \\frac {1}{\\gamma_ {1} ^ {2}} [ \\gamma_ {2} - \\gamma_ {3} ] + \\frac {1}{\\gamma_ {4} ^ {2}} \\gamma_ {5}\\right), \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.362, + 0.922, + 0.453 + ], + "angle": 0, + "content": "where \\(d_{mn}^2 = \\left(x_m - x_n^{\\mathrm{Pin}}\\right)^2 +\\left(y_m - y_n^{\\mathrm{Pin}}\\right)^2 +d_{\\mathrm{H}}^2,\\) \n\\(\\gamma_{1} = \\sum_{n = 1}^{N}\\frac{\\left(x_{m} - x_{n}^{\\mathrm{Pin}}\\right)^{2}}{d_{mn}^{4}},\\gamma_{2} = \\sum_{n = 1}^{N}\\frac{2\\left(x_{m} - x_{n}^{\\mathrm{Pin}}\\right)}{d_{mn}^{4}},\\gamma_{3} =\\) \n\\(\\begin{array}{r}\\sum_{n = 1}^{N}\\frac{4\\left(x_m - x_n^{\\mathrm{Pin}}\\right)^3}{d_{mn}^6},\\gamma_4 = \\sum_{n = 1}^{N}\\frac{\\left(y_m - y_n^{\\mathrm{Pin}}\\right)^2}{d_{mn}^2},\\mathrm{and}\\gamma_5 = \\end{array}\\) \n\\(\\begin{array}{r}\\sum_{n = 1}^{N}\\frac{4\\left(x_m - x_n^{\\mathrm{Pin}}\\right)\\left(y_m - y_n^{\\mathrm{Pin}}\\right)^2}{d_{mn}^6}. \\end{array}\\)" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.453, + 0.922, + 0.526 + ], + "angle": 0, + "content": "Without loss of generality, assume that \\(\\mathrm{U}_m\\) is in the proximity of the first pinching antenna on the first waveguide, i.e., \\(x_{m} = x_{11}^{\\mathrm{Pin}} + \\delta_{x}\\) and \\(y_{m} = y_{11}^{\\mathrm{Pin}} + \\delta_{y}\\), where \\(\\delta_x\\to 0\\) and \\(\\delta_y\\rightarrow 0\\). In this case, \\(\\gamma_{1}\\) in (17) can be approximated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.561, + 0.535, + 0.922, + 0.582 + ], + "angle": 0, + "content": "\\[\n\\gamma_ {1} \\approx \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {n ^ {2} \\Delta_ {x} ^ {2}}{\\left(n ^ {2} \\Delta_ {x} ^ {2} + (i - 1) ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}, \\tag {18}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.592, + 0.921, + 0.624 + ], + "angle": 0, + "content": "where the terms at the order of \\(\\delta_x^2\\) are omitted. Similarly, by omitting the terms of \\(\\delta_x^2\\), \\(\\gamma_2\\) can be approximated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.553, + 0.633, + 0.921, + 0.729 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\gamma_ {2} \\approx \\sum_ {i = 1} ^ {\\frac {N}{N}} \\frac {2 \\delta_ {x}}{\\left(\\delta^ {2} + (i - 1) ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}} \\tag {19} \\\\ - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {2 n \\Delta_ {x}}{\\left(n ^ {2} \\Delta_ {x} ^ {2} + (i - 1) ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.737, + 0.895, + 0.753 + ], + "angle": 0, + "content": "Similarly, \\(\\gamma_3, \\gamma_4\\) and \\(\\gamma_5\\) can be approximated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.54, + 0.762, + 0.921, + 0.809 + ], + "angle": 0, + "content": "\\[\n\\gamma_ {3} \\approx - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {4 n ^ {3} \\Delta_ {x} ^ {3}}{\\left(n ^ {2} \\Delta_ {x} ^ {2} + (i - 1) ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {3}}, \\tag {20}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.54, + 0.811, + 0.921, + 0.858 + ], + "angle": 0, + "content": "\\[\n\\gamma_ {4} \\approx \\sum_ {i = 1} ^ {\\frac {N}{N} - 1} \\sum_ {n = 1} ^ {\\tilde {N}} \\frac {i ^ {2} \\Delta_ {y} ^ {2}}{\\left((n - 1) ^ {2} \\Delta_ {x} ^ {2} + i ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}, \\tag {21}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.54, + 0.86, + 0.921, + 0.906 + ], + "angle": 0, + "content": "\\[\n\\gamma_ {5} \\approx - \\sum_ {i = 1} ^ {\\frac {N}{N} - 1} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {n \\Delta_ {x} i ^ {2} \\Delta_ {y} ^ {2}}{\\left(n ^ {2} \\Delta_ {x} ^ {2} + i \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {3}}. \\tag {22}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.915, + 0.922, + 0.949 + ], + "angle": 0, + "content": "To facilitate the analysis of this local-maximum property of CRLB, assume that \\(\\Delta_x = \\Delta_y = \\Delta \\gg d_{\\mathrm{H}}\\) and \\(\\tilde{N} = \\frac{N}{\\tilde{N}}\\), which" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.032, + 0.921, + 0.041 + ], + "angle": 0, + "content": "4" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.07, + 0.493, + 0.099 + ], + "angle": 0, + "content": "means that \\(\\gamma_{1} = \\gamma_{3}\\), and hence the CRLB can be simplified as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.085, + 0.102, + 0.482, + 0.203 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} \\approx \\frac {\\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \\left[ - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\frac {2 \\delta_ {x}}{(\\delta^ {2} + (i - 1) ^ {2} \\Delta^ {2} + d _ {\\mathrm {H}} ^ {2}) ^ {2}} \\right. \\\\ \\left. + \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {2 n \\Delta}{\\bar {\\beta} _ {n i} ^ {2}} - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {4 n ^ {3} \\Delta^ {3}}{\\bar {\\beta} _ {n i} ^ {3}} - \\sum_ {i = 1} ^ {\\frac {N}{N} - 1} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {4 n ^ {3} \\Delta^ {3}}{\\bar {\\beta} _ {n i} ^ {3}} \\right], \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.205, + 0.339, + 0.223 + ], + "angle": 0, + "content": "where \\(\\bar{\\beta}_{ni} = (n^2 + (i - 1)^2)\\Delta^2 + d_{\\mathrm{H}}^2\\)" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.222, + 0.492, + 0.275 + ], + "angle": 0, + "content": "Note that if \\( i = \\frac{N}{N} \\), \\( \\sum_{n=1}^{\\tilde{N}-1} \\frac{4n^3\\Delta^3}{\\left((n^2+(i-1)^2)\\Delta^2+d_{\\mathrm{H}}^2\\right)^3} \\) is an insignificant term, which means that the CRLB can be further simplified as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.085, + 0.279, + 0.491, + 0.381 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} \\approx \\frac {\\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \\left[ - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\frac {2 \\delta_ {x}}{(\\delta^ {2} + (i - 1) ^ {2} \\Delta^ {2} + d _ {\\mathrm {H}} ^ {2}) ^ {2}} \\right. \\\\ \\left. + 2 \\Delta \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {n \\left(\\left((i - 1) ^ {2} - 3 n ^ {2}\\right) \\Delta^ {2} + d _ {\\mathrm {H}} ^ {2}\\right)}{\\left(\\left(n ^ {2} + (i - 1) ^ {2}\\right) \\Delta^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {3}} \\right]. \\tag {23} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.383, + 0.492, + 0.442 + ], + "angle": 0, + "content": "For the case with \\(\\delta_x = 0\\), i.e., the user is located right underneath of the pinching antenna at \\(\\psi_{11}^{\\mathrm{Pin}}\\), by using the assumption that \\(\\Delta \\gg d\\), the CRLB can be expressed as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.18, + 0.443, + 0.49, + 0.477 + ], + "angle": 0, + "content": "\\[\n\\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} \\approx \\frac {\\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \\frac {2}{\\Delta^ {3}} \\gamma_ {6}, \\tag {24}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.481, + 0.493, + 0.593 + ], + "angle": 0, + "content": "where \\(\\gamma_6 = \\sum_{i=1}^{N} \\sum_{n=1}^{\\tilde{N}-1} \\frac{(i-1)^2 - 3n^2}{(n^2 + (i-1)^2)^3}\\). We note that the terms of \\(\\gamma_6\\) decay rapidly by increasing \\(n\\) and \\(i\\), i.e., \\(\\gamma_6\\) can be approximated by keeping the dominant negative term (\\(n = 1\\) and \\(i = 1\\)) and the dominant positive term (\\(n = 1\\) and \\(i = 3\\)), i.e., \\(\\gamma_6 \\approx -3 + \\frac{1}{125}\\), which means \\(\\frac{\\partial \\mathrm{CRB}_m}{\\partial x_m} \\leq 0\\) for the case with \\(\\delta_x = 0\\). For the case of \\(\\delta_x \\neq 0\\), the CRLB can be approximated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.139, + 0.596, + 0.427, + 0.631 + ], + "angle": 0, + "content": "\\[\n\\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} \\approx \\frac {\\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \\left[ - \\frac {2 \\delta_ {x}}{d _ {\\mathrm {H}} ^ {4}} + \\frac {2}{\\Delta^ {3}} \\gamma_ {6} \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.635, + 0.493, + 0.734 + ], + "angle": 0, + "content": "Due to the assumption of \\(\\Delta \\gg d_{\\mathrm{H}}\\) , the term \\(\\frac{2\\delta_x}{d_{\\mathrm{H}}^4}\\) is dominant, and hence \\(\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m} >0\\) if \\(\\delta_{x} < 0\\) . In summary, \\(\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m} < 0\\) if the user's location is \\((x_{11}^{\\mathrm{Pin}},y_{11}^{\\mathrm{Pin}},0)\\) , and \\(\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m} >0\\) if the user's location is \\((x_{11}^{\\mathrm{Pin}} + \\delta_x,y_{11}^{\\mathrm{Pin}} + \\delta_y,d_{\\mathrm{H}})\\) . A similar conclusion can be established to \\(\\frac{\\partial\\mathrm{CRB}_m}{\\partial y_m}\\) , which means that there exists a local maximum for the CRLB around \\(\\psi_{n}^{\\mathrm{Pin}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.735, + 0.493, + 0.855 + ], + "angle": 0, + "content": "Remark 3: The local maximum property of the CRLB indicates an interesting conflict between the communication and sensing functionalities of pinching antennas. In particular, placing a pinching antenna directly above the user might increase the user's data rate but also degrade positioning accuracy. In other words, this local maximum property reveals the importance of antenna placement in pinching-antenna assisted ISAC networks." + }, + { + "type": "title", + "bbox": [ + 0.19, + 0.866, + 0.377, + 0.88 + ], + "angle": 0, + "content": "IV. NUMERICAL STUDIES" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.885, + 0.492, + 0.945 + ], + "angle": 0, + "content": "In this section, computer simulation results are presented to demonstrate the impact of pinching antennas on the positioning accuracy, where \\( K_{E} = 0.01 \\), \\( D_{\\mathrm{W}} = 10 \\mathrm{~m} \\) and \\( D_{\\mathrm{L}} = 40 \\mathrm{~m} \\), unless stated otherwise." + }, + { + "type": "image", + "bbox": [ + 0.574, + 0.066, + 0.857, + 0.235 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.504, + 0.239, + 0.924, + 0.312 + ], + "angle": 0, + "content": "Fig. 1. Averaged CRLBs, \\(\\mathrm{CRB}_m\\), achieved by the considered antenna systems, where \\(N_{\\mathrm{WG}} = 2\\) and \\(d = 3\\mathrm{m}\\). For the pinching-antenna system, on each waveguide, there are \\(\\frac{N}{N_{\\mathrm{WG}}}\\) antennas, which are equally spaced. Due to the singularity issue experienced by conventional antennas discussed in Section III-B1, users are assumed to be excluded from a square area with its side being \\(a\\) and its center at the origin." + }, + { + "type": "image", + "bbox": [ + 0.571, + 0.323, + 0.841, + 0.469 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.641, + 0.479, + 0.788, + 0.492 + ], + "angle": 0, + "content": "(a) Conventional Antennas" + }, + { + "type": "image", + "bbox": [ + 0.574, + 0.508, + 0.838, + 0.653 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.649, + 0.663, + 0.774, + 0.676 + ], + "angle": 0, + "content": "(b) Pinching Antennas" + }, + { + "type": "image_caption", + "bbox": [ + 0.504, + 0.675, + 0.921, + 0.713 + ], + "angle": 0, + "content": "Fig. 2. CRLBs achieved by the considered antenna systems. \\( N = 20 \\), \\( N_{\\mathrm{WG}} = 2 \\) and \\( d = 3 \\, \\mathrm{m} \\). On each waveguide, there are \\( \\frac{N}{N_{\\mathrm{WG}}} \\) antennas, which are equally spaced." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.717, + 0.922, + 0.867 + ], + "angle": 0, + "content": "In Fig. 1, the averaged CRLBs achieved by the conventional and pinching-antenna systems are shown as functions of the number of antennas, where \\( \\mathrm{U}_m \\) is assumed to be uniformly deployed in the service area. Because the conventional-antenna system suffers the singularity issue discussed in Section III-B1, it is assumed that \\( \\mathrm{U}_m \\) cannot be located in a square area with its side being \\( a \\) and its center at the origin. As can be seen from Fig. 1, the use of pinching antennas yields a significant performance gain over conventional antennas, regardless of the choices of \\( N \\) and \\( a \\)." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.87, + 0.922, + 0.946 + ], + "angle": 0, + "content": "Fig. 2 is provided to highlight the fact that a user's positioning accuracy depends on its location. On the one hand, Fig. 2(a) shows that for conventional antennas, a user can experience extremely poor positioning accuracy if it is located far away from the center of the service area, which" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "5" + }, + { + "type": "image", + "bbox": [ + 0.147, + 0.053, + 0.413, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.145, + 0.206, + 0.42, + 0.226 + ], + "angle": 0, + "content": "(a) Positioning with a focal point at \\(\\left(-\\frac{D_{\\mathrm{L}}}{4},0,0\\right)\\)" + }, + { + "type": "image", + "bbox": [ + 0.144, + 0.241, + 0.408, + 0.386 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.149, + 0.394, + 0.412, + 0.414 + ], + "angle": 0, + "content": "(b) Positioning with a focal point at \\(\\left(\\frac{D_{\\mathrm{L}}}{4},0,0\\right)\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.414, + 0.492, + 0.465 + ], + "angle": 0, + "content": "Fig. 3. Using pinching antennas to achieve flexible user-centric positioning. \\( N = 20 \\), \\( N_{\\mathrm{WG}} = 2 \\) and \\( d = 3 \\mathrm{~m} \\). On each waveguide, there are \\( \\frac{N}{N_{\\mathrm{WG}}} \\) antennas, which are equally spaced in a segment with its length being \\( \\frac{D_{\\mathrm{L}}}{2} \\) and its center at the focal points shown in the figures." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.466, + 0.491, + 0.601 + ], + "angle": 0, + "content": "confirms the analytical results shown in (10). On the other hand, Fig. 2(b) shows that the use of pinching antennas ensures reasonably accurate positioning, regardless of whether the user is at the center or the edge of the service area. This also means that for the multi-user scenario, using pinching antennas can ensure fairness for the users' positioning accuracy. We note that in Fig. 2(b), local maximums are clearly visible in the proximity of the pinching antennas, which confirms the analysis shown in Section III-B3." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.602, + 0.493, + 0.797 + ], + "angle": 0, + "content": "Recall that one key feature of pinching antennas is their reconfiguration capabilities, where the number and the locations of the antennas can be changed in a flexible manner. Fig. 3 demonstrates how this reconfiguration feature can be used to achieve flexible user-centric positioning. In particular, Figs. 3(a) and 3(b) show that by activating the pinching antennas close to the intended user locations, different focal points can be realized, which means that users close to these focal points can enjoy high positioning accuracy. For the case where the pinching antennas are clustered close to a user, Fig. 4 is provided to show the impact of the antenna spacing on the CRLB, where the accuracy of the analytical results developed in (16) is also verified." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.805, + 0.348, + 0.819 + ], + "angle": 0, + "content": "V. CONCLUSIONS" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.824, + 0.492, + 0.947 + ], + "angle": 0, + "content": "This letter investigated how the key features of pinching antennas can be used to support ISAC from the CRLB perspective. In particular, the CRLB achieved by pinching antennas was first derived and then compared to that of conventional antennas. The presented analytical and simulation results demonstrated that the use of pinching antennas can significantly reduce CRLB and, hence, enhance the sensing capability. In addition, this letter showed that the low-cost and" + }, + { + "type": "image", + "bbox": [ + 0.574, + 0.041, + 0.858, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.504, + 0.216, + 0.922, + 0.263 + ], + "angle": 0, + "content": "Fig. 4. Impact of the antenna spacing on the CRLB. \\( N = 4 \\) pinching antennas are activated in a square-shape area with the antenna spacing being \\( \\Delta \\) and \\( \\mathrm{U}_m \\) located at the center of the area, where \\( N_{\\mathrm{WG}} = 2 \\). The analytical results are based on (16)." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.269, + 0.922, + 0.3 + ], + "angle": 0, + "content": "reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning." + }, + { + "type": "title", + "bbox": [ + 0.665, + 0.307, + 0.762, + 0.319 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.327, + 0.922, + 0.361 + ], + "angle": 0, + "content": "[1] A. Fukuda, H. Yamamoto, H. Okazaki, Y. Suzuki, and K. Kawai, \"Pinching antenna - using a dielectric waveguide as an antenna,\" NTT DOCOMO Technical J., vol. 23, no. 3, pp. 5-12, Jan. 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.361, + 0.921, + 0.394 + ], + "angle": 0, + "content": "[2] Z. Ding, R. Schober, and H. V. Poor, \"Flexible-antenna systems: A pinching-antenna perspective,\" IEEE Trans. Commun., (to appear in 2025) Available on-line at arXiv:2412.02376." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.395, + 0.921, + 0.428 + ], + "angle": 0, + "content": "[3] Z. Ding and H. V. Poor, “Los blockage in pinching-antenna systems: Curse or blessing?” IEEE Wireless Commun. Lett., (submitted) Available on-line at arXiv:2503.08554." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.429, + 0.921, + 0.463 + ], + "angle": 0, + "content": "[4] K. Wang, Z. Ding, and R. Schober, \"Antenna activation for NOMA assisted pinching-antenna systems,\" IEEE Commun. Lett., (to appear in 2025) Available on-line at arXiv:2412.13969." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.463, + 0.921, + 0.496 + ], + "angle": 0, + "content": "[5] C. Ouyang, Z. Wang, Y. Liu, and Z. Ding, \"Array gain for pinching-antenna systems (PASS),\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2501.05657." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.496, + 0.921, + 0.531 + ], + "angle": 0, + "content": "[6] Z. Wang, C. Ouyang, X. Mu, Y. Liu, and Z. Ding, \"Modeling and beamforming optimization for pinching-antenna systems,\" IEEE Trans. Wireless Commun., (submitted) Available on-line at arXiv:2502.05917." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.531, + 0.921, + 0.564 + ], + "angle": 0, + "content": "[7] Y. Xu, Z. Ding, and G. Karagiannidis, \"Rate maximization for downlink pinching-antenna systems,\" IEEE Commun. Lett., (to appear in 2025) Available on-line at arXiv:2502.12629." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.565, + 0.921, + 0.599 + ], + "angle": 0, + "content": "[8] X. Mu, G. Zhu, and Y. Liu, \"Pinching-antenna system (PASS)-enabled multicast communications,\" IEEE Trans. Commun., (submitted) Available on-line at arXiv:2502.16624." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.599, + 0.921, + 0.632 + ], + "angle": 0, + "content": "[9] J. Xiao, J. Wang, and Y. Liu, \"Channel estimation for pinching-antenna systems (PASS),\" IEEE Trans. Commun., (submitted) Available on-line at arXiv:2503.13268." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.633, + 0.921, + 0.666 + ], + "angle": 0, + "content": "[10] ——, “Beam training for pinching-antenna systems (PASS),” IEEE Trans. Wireless Commun., (submitted) Available on-line at arXiv:2502.05921." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.666, + 0.921, + 0.699 + ], + "angle": 0, + "content": "[11] X. Xie, Y. Lu, and Z. Ding, \"Graph neural network enabled pinching antennas,\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2502.05447." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.7, + 0.921, + 0.734 + ], + "angle": 0, + "content": "[12] J. Guo, Y. Liu, and A. Nallanathan, \"GPASS: Deep learning for beamforming in pinching-antenna systems (PASS),\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2502.01438." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.735, + 0.921, + 0.779 + ], + "angle": 0, + "content": "[13] S. A. Tegos, P. D. Diamantoulakis, Z. Ding, and G. K. Karagiannidis, \"Minimum data rate maximization for uplink pinching-antenna systems,\" IEEE Wireless Commun. Lett., (to appear in 2025) Available on-line at arXiv:2412.13892." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.78, + 0.921, + 0.813 + ], + "angle": 0, + "content": "[14] M. Sun, C. Ouyang, S. Wu, and Y. Liu, \"Physical layer security for pinching-antenna systems (PASS),\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2503.09075." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.814, + 0.921, + 0.847 + ], + "angle": 0, + "content": "[15] Y. Qin, Y. Fu, and H. Zhang, \"Joint antenna position and transmit power optimization for pinching antenna-assisted ISAC systems,\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2503.12872." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.848, + 0.921, + 0.893 + ], + "angle": 0, + "content": "[16] F. Liu, Y. Cui, C. Masouros, J. Xu, T. X. Han, Y. C. Eldar, and S. Buzzi, \"Integrated sensing and communications: Toward dual-functional wireless networks for 6G and beyond,\" IEEE J. Sel. Areas Commun., vol. 40, no. 6, pp. 1728-1767, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.894, + 0.921, + 0.928 + ], + "angle": 0, + "content": "[17] T. Jia and R. M. Buehrer, “A new cramer-rao lower bound for TOA-based localization,” in Proc. Military Commun. Conf. (MILCOM 2008), Nov. 2008, pp. 1-5." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.327, + 0.922, + 0.928 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05792/2e1d882c-c0bf-4c6f-98d8-3ae7d8fb9f26_origin.pdf b/data/2025/2504_05xxx/2504.05792/2e1d882c-c0bf-4c6f-98d8-3ae7d8fb9f26_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8b96dcbdcc3f5d47eadeaa00d0ac89f2338a9dc8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/2e1d882c-c0bf-4c6f-98d8-3ae7d8fb9f26_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0c4348fab16e7367bc1a5a65714dca2086121d9f02796b0cad0528e883f7342 +size 391018 diff --git a/data/2025/2504_05xxx/2504.05792/full.md b/data/2025/2504_05xxx/2504.05792/full.md new file mode 100644 index 0000000000000000000000000000000000000000..3e9cb1ebd08836d4267078c141cfb83e03e9e513 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/full.md @@ -0,0 +1,275 @@ +# Pinching-Antenna Assisted ISAC: A CRLB Perspective + +Zhiguo Ding, Fellow, IEEE + +Abstract—Recently, pinching antennas have attracted significant research interest due to their capability to reconfigure wireless channels as well as their array configuration flexibility. This letter focuses on how these features can be used to support integrated sensing and communications (ISAC) from the Cramér-Rao lower bound (CRLB) perspective. In particular, the CRLB achieved by pinching antennas is first derived and then compared to that of conventional antennas. The presented analytical and simulation results demonstrate that using pinching antennas can significantly reduce CRLB and, hence, enhance positioning accuracy. In addition, this letter also reveals that the low-cost and reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning. + +Index Terms—Pinching antennas, integrated sensing and communications (ISAC), Cramér-Rao lower bound (CRLB), estimation theory. + +# I. INTRODUCTION + +Recently, pinching antennas have received significant attention from both academia and industry as a novel evolution of smart antennas, and offer three distinguished features [1], [2]. One is their capability to create strong line-of-sight (LoS) links between the transceivers, which means that large-scale path losses and LoS blockage can be effectively mitigated by activating antennas close to users [3]. The second feature is the reconfigurability of pinching-antenna systems, where the topology of a pinching-antenna array, e.g., the locations and the number of pinching antennas, can be flexibly adjusted. The third feature is their practicality, where DOCOMO's prototype shows that pinching antennas can be straightforwardly implemented in a low-cost manner [1]. + +In the literature, there already exists a large amount of work to demonstrate that the use of pinching antennas can significantly enhance the communication functionality of wireless networks. For example, the fundamental issues of pinching antennas, such as antenna activation, the architecture of a pinching-antenna array, and the array gains, have been investigated in [4]–[6]. Antenna placement is key to realizing the full potential of pinching-antenna systems, where various designs and their impact on the system throughput have been investigated in [7], [8]. Channel estimation and beam training are crucial issues to pinching-antenna systems, and sophisticated designs using the flexibility features of pinching antennas have been developed in [9], [10]. For many resource allocation problems encountered in pinching-antenna systems, the use of conventional convex optimization leads to high computational complexity, which motivates the application of advanced learning methods [11], [12]. The applications of pinching antennas to improve the uplink throughput and the security of communication networks have also been recently investigated in [13], [14]. + +However, we note that the impact of pinching antennas on the sensing functionality of wireless networks has not yet been fully characterized in the literature, although the recent work in [15] demonstrated the importance of pinching + +Z. Ding is with the University of Manchester, Manchester, M1 9BB, UK, and Khalifa University, Abu Dhabi, UAE. + +antennas in integrated sensing and communication (ISAC) systems [16], which motivates this letter. In particular, in this letter, the Cramér-Rao lower bound (CRLB) is used as the performance metric to characterize the capability of pinching antennas for enhancing the positioning accuracy of ISAC networks. The CRLB achieved by pinching antennas is first derived in the letter, and then compared to conventional antennas. The presented analytical results reveal that the use of pinching antennas can ensure that users at different locations experience uniform positioning accuracy, whereas the use of conventional antennas can result in a significant disparity in accuracy among the users. In addition, the important properties of CRLB achieved by pinching antennas, such as the effects of antenna placement and the local maximums of CRLB, are also investigated. Furthermore, this letter also reveals that the low-cost and reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning. + +# II. SYSTEM MODEL + +Consider a pinching-antenna system that is deployed to provide ISAC services to $M$ single-antenna users, denoted by $\mathrm{U}_m$ . Given the fact that there is already a rich literature on using pinching antennas to enhance communications, and also due to space limitations, the impact of pinching antennas on the sensing functionality is focused on in this letter. Without loss of generality, assume that $N$ pinching antennas are activated on $N_{\mathrm{WG}}$ waveguides. The location of the $n$ -th pinching antenna is denoted by $\psi_n^{\mathrm{Pin}} = (x_n^{\mathrm{Pin}}, y_n^{\mathrm{Pin}}, d_{\mathrm{H}})$ , where $d_{\mathrm{H}}$ denotes the height of the waveguides. + +The service area is denoted by $\mathcal{A}$ and is assumed to be a rectangle with its two sides denoted by $D_{\mathrm{W}}$ and $D_{\mathrm{L}}$ , respectively, and its center located at $(0,0,0)$ . The users are assumed to be uniformly distributed in $\mathcal{A}$ , and $\mathrm{U}_m$ 's location is denoted by $\psi_m = (x_m,y_m,0)$ . + +Denote the distance from the $n$ -th pinching antenna to the $m$ -th user by $d_{mn}$ . Distance (range) estimates for the $m$ -th user can be modeled as follows: [17] + +$$ +\hat {d} _ {m n} = d _ {m n} + w _ {m n}, \tag {1} +$$ + +where $d_{mn} = \sqrt{(x_m - x_n^{\mathrm{Pin}})^2 + (y_m - y_n^{\mathrm{Pin}})^2 + d_{\mathrm{H}}^2}$ , and $w_{mn}$ is a zero-mean Gaussian distributed noise term whose variance is distance-dependent, i.e., + +$$ +\sigma_ {m n} ^ {2} = K _ {E} \left(\left(x _ {m} - x _ {n} ^ {\mathrm {P i n}}\right) ^ {2} + \left(y _ {m} - y _ {n} ^ {\mathrm {P i n}}\right) ^ {2} + d _ {\mathrm {H}} ^ {2}\right), \tag {2} +$$ + +$K_{E}$ denotes a system parameter decided by the range estimation environment. + +# III. IMPACT OF PINCHING ANTENNAS ON POSITIONING + +# A. CRLB Achieved by Pinching-Antenna Systems + +Without loss of generality, the impact of pinching antennas on $\mathrm{U}_m$ 's localization is focused on. The joint probability den + +sity function (pdf) of $\hat{d}_{mn}$ conditioned on $d_{mn}$ , $1\leq n\leq N$ , is given by + +$$ +f (\hat {d} _ {m 1}, \dots , \hat {d} _ {m N}) = \prod_ {n = 1} ^ {N} \frac {1}{\sqrt {2 \pi \sigma_ {m n} ^ {2}}} e ^ {- \frac {(\hat {d} _ {m n} - d _ {m n}) ^ {2}}{2 \sigma_ {m n} ^ {2}}}, \tag {3} +$$ + +whose log-likelihood function is given by + +$$ +\begin{array}{l} L \triangleq \ln f (\hat {d} _ {m 1}, \dots , \hat {d} _ {m N}) = - \frac {N}{2} \ln (2 \pi) \tag {4} \\ - \sum_ {n = 1} ^ {N} \ln \sigma_ {m n} - \sum_ {n = 1} ^ {N} \frac {(\hat {d} _ {m n} - d _ {m n}) ^ {2}}{2 \sigma_ {m n} ^ {2}}. \\ \end{array} +$$ + +Recall that the CRLB for $x_{m}$ and $y_{m}$ is given by + +$$ +\mathcal {E} \left\{\left(\hat {x} _ {m} - x _ {m}\right) ^ {2} + \left(\hat {y} _ {m} - y _ {m}\right) ^ {2} \right\} \geq \frac {1}{J _ {x} ^ {m}} + \frac {1}{J _ {y} ^ {m}} \triangleq \mathrm {C R B} _ {m}, \tag {5} +$$ + +where $\hat{x}_m$ and $\hat{y}_m$ denote the estimates of $x_m$ and $y_m$ , respectively, $J_x^m = \mathcal{E}\left\{-\frac{\partial^2L}{\partial x_m^2}\right\}$ and $J_y^m = \mathcal{E}\left\{-\frac{\partial^2L}{\partial y_m^2}\right\}$ . + +$\frac{\partial L}{\partial x_m}$ can be obtained as follows: + +$$ +\begin{array}{l} \frac {\partial L}{\partial x _ {m}} = - \sum_ {n = 1} ^ {N} \frac {1}{\sigma_ {m n}} \frac {\partial \sigma_ {m n}}{\partial x _ {m}} - \sum_ {n = 1} ^ {N} \frac {\left(d _ {m n} - \hat {d} _ {m n}\right)}{\sigma_ {m n} ^ {2}} \frac {\partial d _ {m n}}{\partial x _ {m}} \tag {6} \\ + \sum_ {n = 1} ^ {N} \frac {(\hat {d} _ {m n} - d _ {m n}) ^ {2}}{\sigma_ {m n} ^ {3}} \frac {\partial \sigma_ {m n}}{\partial x _ {m}}. \\ \end{array} +$$ + +The expression of $\frac{\partial^2L}{\partial x_m^2}$ is quite invoked; however, by using the fact that $\mathcal{E}\{\hat{d}_{mn} - d_{mn}\} = 0$ and following the steps similar to those in [17], the expectation of $\frac{\partial^2L}{\partial x_m^2}$ , i.e., $J_x^m$ , can be obtained as follows: + +$$ +J _ {x} ^ {m} = \sum_ {n = 1} ^ {N} \frac {\left(2 K _ {E} + 1\right)}{\sigma_ {m n} ^ {2}} \frac {\left(x _ {m} - x _ {n} ^ {\operatorname {P i n}}\right) ^ {2}}{\left(x _ {m} - x _ {n} ^ {\operatorname {P i n}}\right) ^ {2} + \left(y _ {m} - y _ {n} ^ {\operatorname {P i n}}\right) ^ {2} + d _ {\mathrm {H}} ^ {2}}. \tag {7} +$$ + +$J_{y}^{m}$ can be obtained in a similar form, which means that the CRLB for estimating $\mathrm{U}_m$ 's location can be expressed as follows: + +$$ +\begin{array}{l} \mathrm {C R B} _ {m} = \frac {K _ {E}}{(2 K _ {E} + 1)} \left(\frac {1}{\sum_ {n = 1} ^ {N} \frac {\left(x _ {m} - x _ {n} ^ {\mathrm {P i n}}\right) ^ {2}}{\left(\left(x _ {m} - x _ {n} ^ {\mathrm {P i n}}\right) ^ {2} + \left(y _ {m} - y _ {n} ^ {\mathrm {P i n}}\right) ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}} \right. \\ \left. + \frac {1}{\sum_ {n = 1} ^ {N} \frac {\left(y _ {m} - y _ {n} ^ {\operatorname* {P i n}}\right) ^ {2}}{\left(\left(x _ {m} - x _ {n} ^ {\operatorname* {P i n}}\right) ^ {2} + \left(y _ {m} - y _ {n} ^ {\operatorname* {P i n}}\right) ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}}\right). \tag {8} \\ \end{array} +$$ + +# B. Performance Analysis Based on CRLB + +1) Performance Gain over Conventional Antennas: For the conventional-antenna benchmark, consider the use of a circular antenna array with its center located at $(0,0,0)$ and its radius being $\frac{\lambda}{4\sin\left(\frac{\pi}{N}\right)}$ , which ensures that the minimal pairwise distance of the antennas is $\frac{\lambda}{2}$ , where $\lambda$ denotes the wavelength. By using the fact that the users are uniformly distributed within the service area, the performance gain of pinching antennas over conventional antennas can be evaluated as follows: + +$$ +\Delta_ {\mathrm {C R B}} = \int_ {- \frac {D _ {\mathrm {L}}}{2}} ^ {\frac {D _ {\mathrm {L}}}{2}} \int_ {- \frac {D _ {\mathrm {W}}}{2}} ^ {\frac {D _ {\mathrm {W}}}{2}} \left(\mathrm {C R B} _ {m} - \mathrm {C R B} _ {m} ^ {\text {C o n v}}\right) \frac {d y _ {m}}{D _ {\mathrm {W}}} \frac {d x _ {m}}{D _ {\mathrm {L}}}, \tag {9} +$$ + +where $\mathrm{CRB}_m^{\mathrm{Conv}}$ can be obtained similarly to $\mathrm{CRB}_m$ by replacing the locations of the pinching antennas with those of the conventional antennas. The performance gain in (9) can be straightforwardly evaluated via computer simulations, but a closed-form expression of $\Delta_{\mathrm{CRB}}$ is difficult to obtain due to the factional expression of the CRLB. We note that the performance gain of pinching antennas over conventional antennas can also be illustrated by simply focusing on the user which is located at $\left(\frac{D_{\mathrm{L}}}{2},0,0\right)$ . The use of conventional antennas can achieve the following CRLB: + +$$ +\begin{array}{l} \mathrm {C R B} _ {m} ^ {\mathrm {C o n v}} = \left(\frac {1}{\sum_ {n = 1} ^ {N} \frac {\left(\frac {D _ {\mathrm {L}}}{2} - x _ {n} ^ {\mathrm {C o n v}}\right) ^ {2}}{\left(\left(\frac {D _ {\mathrm {L}}}{2} - x _ {n} ^ {\mathrm {C o n v}}\right) ^ {2} + (y _ {n} ^ {\mathrm {C o n v}}) ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}} \right. \\ \left. + \frac {1}{\sum_ {n = 1} ^ {N} \frac {\left(y _ {n} ^ {\text {C o n v}}\right) ^ {2}}{\left(\left(\frac {D _ {\mathrm {L}}}{2} - x _ {n} ^ {\text {C o n v}}\right) ^ {2} + \left(y _ {n} ^ {\text {C o n v}}\right) ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}}\right) \frac {K _ {E}}{\left(2 K _ {E} + 1\right)} \\ \stackrel {(a)} {\approx} \frac {K _ {E}}{(2 K _ {E} + 1)} \left(\frac {4 \left(\frac {D _ {\mathrm {L}} ^ {2}}{4} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}{N D _ {\mathrm {L}} ^ {2}} + \frac {\left(\frac {D _ {\mathrm {L}} ^ {2}}{4} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}{\sum_ {n = 1} ^ {N} (y _ {n} ^ {\mathrm {C o n v}}) ^ {2}}\right) \\ \xrightarrow {(b)} \infty , \tag {10} \\ \end{array} +$$ + +where step (a) is due to the fact that the conventional antennas are clustered close to the center of the service area, and step (b) is due to the fact that $|y_{n}^{\mathrm{Conv}}| \to 0$ for conventional antennas, particularly for the case with high carrier frequencies (i.e., small wavelengths). + +On the other hand, pinching antennas do not suffer the singularity issue experienced by conventional antennas. For example, for the user located at $\left(\frac{D_{\mathrm{L}}}{2},0,0\right)$ , the corresponding CRLB can be expressed as follows: + +$$ +\begin{array}{l} \mathrm {C R B} _ {m} = \frac {K _ {E}}{\left(2 K _ {E} + 1\right)} \left(\frac {1}{\sum_ {n = 1} ^ {N} \frac {\left(\frac {D _ {\mathrm {L}}}{2} - x _ {n} ^ {\mathrm {P i n}}\right) ^ {2}}{\left(\left(\frac {D _ {\mathrm {L}}}{2} - x _ {n} ^ {\mathrm {P i n}}\right) ^ {2} + (y _ {n} ^ {\mathrm {P i n}}) ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}} \right. \\ \left. + \frac {1}{\sum_ {n = 1} ^ {N} \frac {\left(y _ {n} ^ {\mathrm {P i n}}\right) ^ {2}}{\left(\left(\frac {D _ {\mathrm {L}}}{2} - x _ {n} ^ {\mathrm {P i n}}\right) ^ {2} + \left(y _ {n} ^ {\mathrm {P i n}}\right) ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}}\right). \tag {11} \\ \end{array} +$$ + +For illustrative purposes, a simple upper bound on the CRLB achieved by pinching antennas can be obtained as follows: + +$$ +\begin{array}{l} \mathrm {C R B} _ {m} \leq \frac {K _ {E}}{\left(2 K _ {E} + 1\right)} \left(\frac {\left(\left(\frac {D _ {\mathrm {L}}}{2} - x _ {n} ^ {\operatorname {P i n}}\right) ^ {2} + \left(y _ {n} ^ {\operatorname {P i n}}\right) ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}{\left(\frac {D _ {\mathrm {L}}}{2} - x _ {n} ^ {\operatorname {P i n}}\right) ^ {2}} \right. \\ \left. + \frac {\left(\left(\frac {D _ {\mathrm {L}}}{2} - x _ {n} ^ {\text {P i n}}\right) ^ {2} + \left(y _ {n} ^ {\text {P i n}}\right) ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}{\left(y _ {n} ^ {\text {P i n}}\right) ^ {2}}\right), \tag {12} \\ \end{array} +$$ + +where $n$ is an arbitrary integer between 1 and $N$ . Because of the diverse locations of the $N$ pinching antennas, it is always possible to find $n \in \{1, \dots, N\}$ which yields a finite value for the upper bound shown in (12), i.e., the CRLB achieved by pinching antennas is always bounded. + +Remark 1: Unlike conventional antennas which can cause noticeable accuracy variations between users, the carried-out case study shows that pinching antennas have the ability to offer uniform positioning accuracy between the users. + +2) Flexible User-Centric Positioning: Due to their low-cost and reconfigurability features, the locations of pinching antennas can be tailored to a serving user for realizing flexible user-centric positioning. To facilitate the performance analysis, the association between the pinching antennas and the waveguides is required. Without loss of generality, assume that there are $\tilde{N} = \frac{N}{N_{\mathrm{WG}}}$ pinching antennas on each waveguide. Denote the location of the $n$ -th antenna on the $i$ -th waveguide by $\psi_{in}^{\mathrm{Pin}} = (x_{in}^{\mathrm{Pin}},y_{in}^{\mathrm{Pin}},d_{\mathrm{H}})$ . Furthermore, assume that the antennas are equally spaced, and define $\Delta_x = |x_{in}^{\mathrm{Pin}} - x_{im}^{\mathrm{Pin}}|$ and $\Delta_y = |x_{in}^{\mathrm{Pin}} - x_{jn}^{\mathrm{Pin}}|$ , $m\neq n$ and $i\neq j$ . + +For illustrative purposes, assume that all $N$ pinching antennas are activated in a square area with $\mathrm{U}_m$ at its center, where $\tilde{N} = N_{\mathrm{WG}}$ and $\Delta_x = \Delta_y = \Delta$ . This assumption is made to facilitate the performance analysis, and more practical setups will be considered in the simulation section. Define $\bar{N} = \frac{\tilde{N}}{2}$ , and without loss of generality, assume that $\bar{N}$ is an even number. + +With these assumptions, the CRLB in (8) can be simplified as follows: + +$$ +\mathrm {C R B} _ {m} = \frac {\frac {K _ {E} \Delta^ {2}}{4 (2 K _ {E} + 1)}}{\sum_ {i = 1} ^ {\bar {N}} \sum_ {n = 1} ^ {\bar {N}} \frac {(n - \frac {1}{2}) ^ {2}}{\beta_ {n i} ^ {2}}} + \frac {\frac {K _ {E} \Delta^ {2}}{4 (2 K _ {E} + 1)}}{\sum_ {i = 1} ^ {\bar {N}} \sum_ {n = 1} ^ {\bar {N}} \frac {(i - \frac {1}{2}) ^ {2}}{\beta_ {n i} ^ {2}}}, +$$ + +where $= \left(n - \frac{1}{2}\right)^{2} + \left(i - \frac{1}{2}\right)^{2} + \frac{d_{\mathrm{H}}^{2}}{\Delta^{2}}$ . The above CRLB can be used to design the antenna placement, i.e., the optimal choice of $\Delta$ for minimizing the CRLB. Computer simulations can be used to verify that $\frac{\partial^2\mathrm{CRB}_m}{\partial\Delta^2} > 0$ , i.e., $\mathrm{CRB}_m$ is a convex function of $\Delta$ , and hence convex optimization solvers can be used to find the optimal solution of $\Delta$ efficiently. To obtain an insightful understanding of the optimal choice of $\Delta$ , a special case with $N = 4$ is focused on in the following. We note that this special case is important in practice, given the fact that using a small number of antennas is helpful in reducing system overhead. For the case with $N = 4$ , the CRLB can be simplified as follows: + +$$ +\mathrm {C R B} _ {m} = \frac {2 K _ {E} \Delta^ {2}}{\left(2 K _ {E} + 1\right)} \left(\frac {1}{2} + \frac {d _ {\mathrm {H}} ^ {2}}{\Delta^ {2}}\right) ^ {2}, \tag {13} +$$ + +whose first-order derivative is given by + +$$ +\frac {\partial \mathrm {C R B} _ {m}}{\partial \Delta} = \frac {4 K _ {E}}{(2 K _ {E} + 1)} \left(\frac {1}{2} \Delta + \frac {d _ {\mathrm {H}} ^ {2}}{\Delta}\right) \left(\frac {1}{2} - \frac {d _ {\mathrm {H}} ^ {2}}{\Delta^ {2}}\right). \tag {14} +$$ + +The second-order derivative of $\mathrm{CRB}_m$ is given by + +$$ +\frac {\partial^ {2} \mathrm {C R B} _ {m}}{\partial \Delta^ {2}} = \frac {4 K _ {E}}{\left(2 K _ {E} + 1\right)} \left(\frac {1}{4} + 3 \frac {d _ {\mathrm {H}} ^ {4}}{\Delta^ {4}}\right) > 0, \tag {15} +$$ + +which means that $\mathrm{CRB}_m$ is a convex function of $\Delta$ . Therefore, the optimal solution of $\Delta$ for minimizing the CRLB for the special case with $N = 4$ is given by + +$$ +\Delta^ {*} = \sqrt {2} d _ {H}. \tag {16} +$$ + +Remark 2: An intuition is that the CRLB is minimized if all the antennas are placed as close to the user as possible, i.e., $\Delta^{*} \to 0$ (or $\frac{\lambda}{2}$ to avoid antenna coupling). (16) shows that this intuition is wrong, where the optimal antenna spacing is a function of the height of the waveguides. + +3) Local-Maximum Property of CRLB: In the proximity of each pinching antenna, $\psi_{n}^{\mathrm{Pin}}$ , there exists a local maximum of $\mathrm{CRB}_m$ shown in (8). This local-maximum property can be revealed by studying $\frac{\partial\mathrm{CRB}_m}{\partial x_m}$ and $\frac{\partial\mathrm{CRB}_m}{\partial y_m}$ . Without loss of generality, $\frac{\partial\mathrm{CRB}_m}{\partial x_m}$ is focused, and can be expressed as follows: + +$$ +\frac {\partial \mathrm {C R B} _ {m}}{\partial x _ {m}} = \frac {K _ {E}}{(2 K _ {E} + 1)} \left(- \frac {1}{\gamma_ {1} ^ {2}} [ \gamma_ {2} - \gamma_ {3} ] + \frac {1}{\gamma_ {4} ^ {2}} \gamma_ {5}\right), \tag {17} +$$ + +where $d_{mn}^2 = \left(x_m - x_n^{\mathrm{Pin}}\right)^2 +\left(y_m - y_n^{\mathrm{Pin}}\right)^2 +d_{\mathrm{H}}^2,$ $\gamma_{1} = \sum_{n = 1}^{N}\frac{\left(x_{m} - x_{n}^{\mathrm{Pin}}\right)^{2}}{d_{mn}^{4}},\gamma_{2} = \sum_{n = 1}^{N}\frac{2\left(x_{m} - x_{n}^{\mathrm{Pin}}\right)}{d_{mn}^{4}},\gamma_{3} =$ $\begin{array}{r}\sum_{n = 1}^{N}\frac{4\left(x_m - x_n^{\mathrm{Pin}}\right)^3}{d_{mn}^6},\gamma_4 = \sum_{n = 1}^{N}\frac{\left(y_m - y_n^{\mathrm{Pin}}\right)^2}{d_{mn}^2},\mathrm{and}\gamma_5 = \end{array}$ $\begin{array}{r}\sum_{n = 1}^{N}\frac{4\left(x_m - x_n^{\mathrm{Pin}}\right)\left(y_m - y_n^{\mathrm{Pin}}\right)^2}{d_{mn}^6}. \end{array}$ + +Without loss of generality, assume that $\mathrm{U}_m$ is in the proximity of the first pinching antenna on the first waveguide, i.e., $x_{m} = x_{11}^{\mathrm{Pin}} + \delta_{x}$ and $y_{m} = y_{11}^{\mathrm{Pin}} + \delta_{y}$ , where $\delta_x\to 0$ and $\delta_y\rightarrow 0$ . In this case, $\gamma_{1}$ in (17) can be approximated as follows: + +$$ +\gamma_ {1} \approx \sum_ {i = 1} ^ {\frac {N}{N}} \sum_ {n = 1} ^ {\tilde {N} - 1} \frac {n ^ {2} \Delta_ {x} ^ {2}}{\left(n ^ {2} \Delta_ {x} ^ {2} + (i - 1) ^ {2} \Delta_ {y} ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}, \tag {18} +$$ + +where the terms at the order of $\delta_x^2$ are omitted. Similarly, by omitting the terms of $\delta_x^2$ , $\gamma_2$ can be approximated as follows: + +$$ +\begin{array}{l} \gamma_ {2} \approx \sum_ {i = 1} ^ {\frac {N}{N}} \frac {2 \delta_ {x}}{\left(\delta^ {2} + (i - 1) ^ {2} \Delta_ {y} ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}} \tag {19} \\ - \sum_ {i = 1} ^ {\frac {N}{N}} \sum_ {n = 1} ^ {\tilde {N} - 1} \frac {2 n \Delta_ {x}}{\left(n ^ {2} \Delta_ {x} ^ {2} + (i - 1) ^ {2} \Delta_ {y} ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}. \\ \end{array} +$$ + +Similarly, $\gamma_3, \gamma_4$ and $\gamma_5$ can be approximated as follows: + +$$ +\gamma_ {3} \approx - \sum_ {i = 1} ^ {\frac {N}{N}} \sum_ {n = 1} ^ {\tilde {N} - 1} \frac {4 n ^ {3} \Delta_ {x} ^ {3}}{\left(n ^ {2} \Delta_ {x} ^ {2} + (i - 1) ^ {2} \Delta_ {y} ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {3}}, \tag {20} +$$ + +$$ +\gamma_ {4} \approx \sum_ {i = 1} ^ {\frac {N}{N} - 1} \sum_ {n = 1} ^ {\tilde {N}} \frac {i ^ {2} \Delta_ {y} ^ {2}}{\left((n - 1) ^ {2} \Delta_ {x} ^ {2} + i ^ {2} \Delta_ {y} ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}, \tag {21} +$$ + +$$ +\gamma_ {5} \approx - \sum_ {i = 1} ^ {\frac {N}{N} - 1} \sum_ {n = 1} ^ {\tilde {N} - 1} \frac {n \Delta_ {x} i ^ {2} \Delta_ {y} ^ {2}}{\left(n ^ {2} \Delta_ {x} ^ {2} + i \Delta_ {y} ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {3}}. \tag {22} +$$ + +To facilitate the analysis of this local-maximum property of CRLB, assume that $\Delta_x = \Delta_y = \Delta \gg d_{\mathrm{H}}$ and $\tilde{N} = \frac{N}{\tilde{N}}$ , which + +means that $\gamma_{1} = \gamma_{3}$ , and hence the CRLB can be simplified as follows: + +$$ +\begin{array}{l} \frac {\partial \mathrm {C R B} _ {m}}{\partial x _ {m}} \approx \frac {\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \left[ - \sum_ {i = 1} ^ {\frac {N}{N}} \frac {2 \delta_ {x}}{(\delta^ {2} + (i - 1) ^ {2} \Delta^ {2} + d _ {\mathrm {H}} ^ {2}) ^ {2}} \right. \\ \left. + \sum_ {i = 1} ^ {\frac {N}{N}} \sum_ {n = 1} ^ {\tilde {N} - 1} \frac {2 n \Delta}{\bar {\beta} _ {n i} ^ {2}} - \sum_ {i = 1} ^ {\frac {N}{N}} \sum_ {n = 1} ^ {\tilde {N} - 1} \frac {4 n ^ {3} \Delta^ {3}}{\bar {\beta} _ {n i} ^ {3}} - \sum_ {i = 1} ^ {\frac {N}{N} - 1} \sum_ {n = 1} ^ {\tilde {N} - 1} \frac {4 n ^ {3} \Delta^ {3}}{\bar {\beta} _ {n i} ^ {3}} \right], \\ \end{array} +$$ + +where $\bar{\beta}_{ni} = (n^2 + (i - 1)^2)\Delta^2 + d_{\mathrm{H}}^2$ + +Note that if $i = \frac{N}{N}$ , $\sum_{n=1}^{\tilde{N}-1} \frac{4n^3\Delta^3}{\left((n^2+(i-1)^2)\Delta^2+d_{\mathrm{H}}^2\right)^3}$ is an insignificant term, which means that the CRLB can be further simplified as follows: + +$$ +\begin{array}{l} \frac {\partial \mathrm {C R B} _ {m}}{\partial x _ {m}} \approx \frac {\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \left[ - \sum_ {i = 1} ^ {\frac {N}{N}} \frac {2 \delta_ {x}}{(\delta^ {2} + (i - 1) ^ {2} \Delta^ {2} + d _ {\mathrm {H}} ^ {2}) ^ {2}} \right. \\ \left. + 2 \Delta \sum_ {i = 1} ^ {\frac {N}{N}} \sum_ {n = 1} ^ {\tilde {N} - 1} \frac {n \left(\left((i - 1) ^ {2} - 3 n ^ {2}\right) \Delta^ {2} + d _ {\mathrm {H}} ^ {2}\right)}{\left(\left(n ^ {2} + (i - 1) ^ {2}\right) \Delta^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {3}} \right]. \tag {23} \\ \end{array} +$$ + +For the case with $\delta_x = 0$ , i.e., the user is located right underneath of the pinching antenna at $\psi_{11}^{\mathrm{Pin}}$ , by using the assumption that $\Delta \gg d$ , the CRLB can be expressed as follows: + +$$ +\frac {\partial \mathrm {C R B} _ {m}}{\partial x _ {m}} \approx \frac {\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \frac {2}{\Delta^ {3}} \gamma_ {6}, \tag {24} +$$ + +where $\gamma_6 = \sum_{i=1}^{N} \sum_{n=1}^{\tilde{N}-1} \frac{(i-1)^2 - 3n^2}{(n^2 + (i-1)^2)^3}$ . We note that the terms of $\gamma_6$ decay rapidly by increasing $n$ and $i$ , i.e., $\gamma_6$ can be approximated by keeping the dominant negative term ( $n = 1$ and $i = 1$ ) and the dominant positive term ( $n = 1$ and $i = 3$ ), i.e., $\gamma_6 \approx -3 + \frac{1}{125}$ , which means $\frac{\partial \mathrm{CRB}_m}{\partial x_m} \leq 0$ for the case with $\delta_x = 0$ . For the case of $\delta_x \neq 0$ , the CRLB can be approximated as follows: + +$$ +\frac {\partial \mathrm {C R B} _ {m}}{\partial x _ {m}} \approx \frac {\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \left[ - \frac {2 \delta_ {x}}{d _ {\mathrm {H}} ^ {4}} + \frac {2}{\Delta^ {3}} \gamma_ {6} \right]. +$$ + +Due to the assumption of $\Delta \gg d_{\mathrm{H}}$ , the term $\frac{2\delta_x}{d_{\mathrm{H}}^4}$ is dominant, and hence $\frac{\partial\mathrm{CRB}_m}{\partial x_m} >0$ if $\delta_{x} < 0$ . In summary, $\frac{\partial\mathrm{CRB}_m}{\partial x_m} < 0$ if the user's location is $(x_{11}^{\mathrm{Pin}},y_{11}^{\mathrm{Pin}},0)$ , and $\frac{\partial\mathrm{CRB}_m}{\partial x_m} >0$ if the user's location is $(x_{11}^{\mathrm{Pin}} + \delta_x,y_{11}^{\mathrm{Pin}} + \delta_y,d_{\mathrm{H}})$ . A similar conclusion can be established to $\frac{\partial\mathrm{CRB}_m}{\partial y_m}$ , which means that there exists a local maximum for the CRLB around $\psi_{n}^{\mathrm{Pin}}$ + +Remark 3: The local maximum property of the CRLB indicates an interesting conflict between the communication and sensing functionalities of pinching antennas. In particular, placing a pinching antenna directly above the user might increase the user's data rate but also degrade positioning accuracy. In other words, this local maximum property reveals the importance of antenna placement in pinching-antenna assisted ISAC networks. + +# IV. NUMERICAL STUDIES + +In this section, computer simulation results are presented to demonstrate the impact of pinching antennas on the positioning accuracy, where $K_{E} = 0.01$ , $D_{\mathrm{W}} = 10 \mathrm{~m}$ and $D_{\mathrm{L}} = 40 \mathrm{~m}$ , unless stated otherwise. + +![](images/de2fd461e68d196df1294074a08a6e8f1ae03b5be4657fc9a3ee1fe1bcd21315.jpg) +Fig. 1. Averaged CRLBs, $\mathrm{CRB}_m$ , achieved by the considered antenna systems, where $N_{\mathrm{WG}} = 2$ and $d = 3\mathrm{m}$ . For the pinching-antenna system, on each waveguide, there are $\frac{N}{N_{\mathrm{WG}}}$ antennas, which are equally spaced. Due to the singularity issue experienced by conventional antennas discussed in Section III-B1, users are assumed to be excluded from a square area with its side being $a$ and its center at the origin. + +![](images/8c86d7c82be13cfdb700185fedbcfc043903bcacc188a31c96bd78c0d022b95d.jpg) +(a) Conventional Antennas + +![](images/1471639b0f119a70aea449f2c23ef35cea4d5de252869d12349b3c54fafbf1c3.jpg) +(b) Pinching Antennas +Fig. 2. CRLBs achieved by the considered antenna systems. $N = 20$ , $N_{\mathrm{WG}} = 2$ and $d = 3 \, \mathrm{m}$ . On each waveguide, there are $\frac{N}{N_{\mathrm{WG}}}$ antennas, which are equally spaced. + +In Fig. 1, the averaged CRLBs achieved by the conventional and pinching-antenna systems are shown as functions of the number of antennas, where $\mathrm{U}_m$ is assumed to be uniformly deployed in the service area. Because the conventional-antenna system suffers the singularity issue discussed in Section III-B1, it is assumed that $\mathrm{U}_m$ cannot be located in a square area with its side being $a$ and its center at the origin. As can be seen from Fig. 1, the use of pinching antennas yields a significant performance gain over conventional antennas, regardless of the choices of $N$ and $a$ . + +Fig. 2 is provided to highlight the fact that a user's positioning accuracy depends on its location. On the one hand, Fig. 2(a) shows that for conventional antennas, a user can experience extremely poor positioning accuracy if it is located far away from the center of the service area, which + +![](images/214063bb72dbd9a11d4eb4e79e473fdc96c3ca35a008baf61c81991492ed251a.jpg) +(a) Positioning with a focal point at $\left(-\frac{D_{\mathrm{L}}}{4},0,0\right)$ + +![](images/5959a4483a3d708e1fda07cb15e0ba8ae7a2653fca446613aace2299d7205a6c.jpg) +(b) Positioning with a focal point at $\left(\frac{D_{\mathrm{L}}}{4},0,0\right)$ +Fig. 3. Using pinching antennas to achieve flexible user-centric positioning. $N = 20$ , $N_{\mathrm{WG}} = 2$ and $d = 3 \mathrm{~m}$ . On each waveguide, there are $\frac{N}{N_{\mathrm{WG}}}$ antennas, which are equally spaced in a segment with its length being $\frac{D_{\mathrm{L}}}{2}$ and its center at the focal points shown in the figures. + +confirms the analytical results shown in (10). On the other hand, Fig. 2(b) shows that the use of pinching antennas ensures reasonably accurate positioning, regardless of whether the user is at the center or the edge of the service area. This also means that for the multi-user scenario, using pinching antennas can ensure fairness for the users' positioning accuracy. We note that in Fig. 2(b), local maximums are clearly visible in the proximity of the pinching antennas, which confirms the analysis shown in Section III-B3. + +Recall that one key feature of pinching antennas is their reconfiguration capabilities, where the number and the locations of the antennas can be changed in a flexible manner. Fig. 3 demonstrates how this reconfiguration feature can be used to achieve flexible user-centric positioning. In particular, Figs. 3(a) and 3(b) show that by activating the pinching antennas close to the intended user locations, different focal points can be realized, which means that users close to these focal points can enjoy high positioning accuracy. For the case where the pinching antennas are clustered close to a user, Fig. 4 is provided to show the impact of the antenna spacing on the CRLB, where the accuracy of the analytical results developed in (16) is also verified. + +# V. CONCLUSIONS + +This letter investigated how the key features of pinching antennas can be used to support ISAC from the CRLB perspective. In particular, the CRLB achieved by pinching antennas was first derived and then compared to that of conventional antennas. The presented analytical and simulation results demonstrated that the use of pinching antennas can significantly reduce CRLB and, hence, enhance the sensing capability. In addition, this letter showed that the low-cost and + +![](images/95e52804193e9fcc17868d063866468da9677490c21f4494ad8893fb6d18da17.jpg) +Fig. 4. Impact of the antenna spacing on the CRLB. $N = 4$ pinching antennas are activated in a square-shape area with the antenna spacing being $\Delta$ and $\mathrm{U}_m$ located at the center of the area, where $N_{\mathrm{WG}} = 2$ . The analytical results are based on (16). + +reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning. + +# REFERENCES + +[1] A. Fukuda, H. Yamamoto, H. Okazaki, Y. Suzuki, and K. Kawai, "Pinching antenna - using a dielectric waveguide as an antenna," NTT DOCOMO Technical J., vol. 23, no. 3, pp. 5-12, Jan. 2022. +[2] Z. Ding, R. Schober, and H. V. Poor, "Flexible-antenna systems: A pinching-antenna perspective," IEEE Trans. Commun., (to appear in 2025) Available on-line at arXiv:2412.02376. +[3] Z. Ding and H. V. Poor, “Los blockage in pinching-antenna systems: Curse or blessing?” IEEE Wireless Commun. Lett., (submitted) Available on-line at arXiv:2503.08554. +[4] K. Wang, Z. Ding, and R. Schober, "Antenna activation for NOMA assisted pinching-antenna systems," IEEE Commun. Lett., (to appear in 2025) Available on-line at arXiv:2412.13969. +[5] C. Ouyang, Z. Wang, Y. Liu, and Z. Ding, "Array gain for pinching-antenna systems (PASS)," IEEE Commun. Lett., (submitted) Available on-line at arXiv:2501.05657. +[6] Z. Wang, C. Ouyang, X. Mu, Y. Liu, and Z. Ding, "Modeling and beamforming optimization for pinching-antenna systems," IEEE Trans. Wireless Commun., (submitted) Available on-line at arXiv:2502.05917. +[7] Y. Xu, Z. Ding, and G. Karagiannidis, "Rate maximization for downlink pinching-antenna systems," IEEE Commun. Lett., (to appear in 2025) Available on-line at arXiv:2502.12629. +[8] X. Mu, G. Zhu, and Y. Liu, "Pinching-antenna system (PASS)-enabled multicast communications," IEEE Trans. Commun., (submitted) Available on-line at arXiv:2502.16624. +[9] J. Xiao, J. Wang, and Y. Liu, "Channel estimation for pinching-antenna systems (PASS)," IEEE Trans. Commun., (submitted) Available on-line at arXiv:2503.13268. +[10] ——, “Beam training for pinching-antenna systems (PASS),” IEEE Trans. Wireless Commun., (submitted) Available on-line at arXiv:2502.05921. +[11] X. Xie, Y. Lu, and Z. Ding, "Graph neural network enabled pinching antennas," IEEE Commun. Lett., (submitted) Available on-line at arXiv:2502.05447. +[12] J. Guo, Y. Liu, and A. Nallanathan, "GPASS: Deep learning for beamforming in pinching-antenna systems (PASS)," IEEE Commun. Lett., (submitted) Available on-line at arXiv:2502.01438. +[13] S. A. Tegos, P. D. Diamantoulakis, Z. Ding, and G. K. Karagiannidis, "Minimum data rate maximization for uplink pinching-antenna systems," IEEE Wireless Commun. Lett., (to appear in 2025) Available on-line at arXiv:2412.13892. +[14] M. Sun, C. Ouyang, S. Wu, and Y. Liu, "Physical layer security for pinching-antenna systems (PASS)," IEEE Commun. Lett., (submitted) Available on-line at arXiv:2503.09075. +[15] Y. Qin, Y. Fu, and H. Zhang, "Joint antenna position and transmit power optimization for pinching antenna-assisted ISAC systems," IEEE Commun. Lett., (submitted) Available on-line at arXiv:2503.12872. +[16] F. Liu, Y. Cui, C. Masouros, J. Xu, T. X. Han, Y. C. Eldar, and S. Buzzi, "Integrated sensing and communications: Toward dual-functional wireless networks for 6G and beyond," IEEE J. Sel. Areas Commun., vol. 40, no. 6, pp. 1728-1767, 2022. +[17] T. Jia and R. M. Buehrer, “A new cramer-rao lower bound for TOA-based localization,” in Proc. Military Commun. Conf. (MILCOM 2008), Nov. 2008, pp. 1-5. \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05792/images/01ea50d42286274380a45111164366e7d2a8affde9e260718f721bf2d384510e.jpg b/data/2025/2504_05xxx/2504.05792/images/01ea50d42286274380a45111164366e7d2a8affde9e260718f721bf2d384510e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe7e80bcf9a4530e7279376e51d69a89cdab016b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/01ea50d42286274380a45111164366e7d2a8affde9e260718f721bf2d384510e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f945eaaacfc3d649fdd2d89dde6d017f179ffa38937b2da124bff7f60a90ed85 +size 14418 diff --git a/data/2025/2504_05xxx/2504.05792/images/0b193879da9018fe9108b7b97eb52ae05825c2c4ca59ee37a9665be50f9dd931.jpg b/data/2025/2504_05xxx/2504.05792/images/0b193879da9018fe9108b7b97eb52ae05825c2c4ca59ee37a9665be50f9dd931.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ddd4e77e12263fecdfac015da698fa1dac655311 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/0b193879da9018fe9108b7b97eb52ae05825c2c4ca59ee37a9665be50f9dd931.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:506dcf2c6f94d528f29d08e1e6f93d4b386e8d03f60053ccc485e4e238fe371c +size 10902 diff --git a/data/2025/2504_05xxx/2504.05792/images/14299fc635244dedb6a0531d4d4b05a4d9af566abd191463f2ee7da4fce95e15.jpg b/data/2025/2504_05xxx/2504.05792/images/14299fc635244dedb6a0531d4d4b05a4d9af566abd191463f2ee7da4fce95e15.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ad1bd7ce2784d4fbb8e199f059d21ec9fb85f290 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/14299fc635244dedb6a0531d4d4b05a4d9af566abd191463f2ee7da4fce95e15.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd58dd95799da7da75bf34a770ccb5802ecc145d3a551e33b11ece0f8238b657 +size 6532 diff --git a/data/2025/2504_05xxx/2504.05792/images/1471639b0f119a70aea449f2c23ef35cea4d5de252869d12349b3c54fafbf1c3.jpg b/data/2025/2504_05xxx/2504.05792/images/1471639b0f119a70aea449f2c23ef35cea4d5de252869d12349b3c54fafbf1c3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b218eb6cdbcb218b6a78dbb2f66bde3c01662a20 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/1471639b0f119a70aea449f2c23ef35cea4d5de252869d12349b3c54fafbf1c3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa1f32b41b1f75fd0b2acea2df906a18b45f3a4421e806f3807b7b42e2bc3495 +size 13370 diff --git a/data/2025/2504_05xxx/2504.05792/images/151d1b9a99db6dd96408c9f3ba705c7a8bdad63c519ff3859798eac266f15097.jpg b/data/2025/2504_05xxx/2504.05792/images/151d1b9a99db6dd96408c9f3ba705c7a8bdad63c519ff3859798eac266f15097.jpg new file mode 100644 index 0000000000000000000000000000000000000000..28d8fbb148ae1aa15b067332d935880132f65235 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/151d1b9a99db6dd96408c9f3ba705c7a8bdad63c519ff3859798eac266f15097.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9517c0e708f2de3ece914d741afe99f273780747722cf4a0f6816ce6e37e239 +size 19766 diff --git a/data/2025/2504_05xxx/2504.05792/images/1dccd3cab821d5c1eaf6b5880ef6b79971c23a5b46324e9c72aad3ff00c1e95a.jpg b/data/2025/2504_05xxx/2504.05792/images/1dccd3cab821d5c1eaf6b5880ef6b79971c23a5b46324e9c72aad3ff00c1e95a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..173954e0034709559f6d187be4bd3eda2c945f1d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/1dccd3cab821d5c1eaf6b5880ef6b79971c23a5b46324e9c72aad3ff00c1e95a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4da7937bcd846bba6d0299e15ea40c5157ce4fbd93380d215b60cdb298d3062 +size 8823 diff --git a/data/2025/2504_05xxx/2504.05792/images/214063bb72dbd9a11d4eb4e79e473fdc96c3ca35a008baf61c81991492ed251a.jpg b/data/2025/2504_05xxx/2504.05792/images/214063bb72dbd9a11d4eb4e79e473fdc96c3ca35a008baf61c81991492ed251a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c7942af42090f93741076c48966b759babce923 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/214063bb72dbd9a11d4eb4e79e473fdc96c3ca35a008baf61c81991492ed251a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec0cd4e6f01708c91928ed2aa7a6e23d4887241426f1243924098f7ec74904bc +size 13813 diff --git a/data/2025/2504_05xxx/2504.05792/images/247c87c8019459213bfe0ce6435a498ac3d201e36b5445abfef7c87b74001d6d.jpg b/data/2025/2504_05xxx/2504.05792/images/247c87c8019459213bfe0ce6435a498ac3d201e36b5445abfef7c87b74001d6d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8e682765d3fc62044f24d564688746ea6dd78bd7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/247c87c8019459213bfe0ce6435a498ac3d201e36b5445abfef7c87b74001d6d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b5050c05d3d6fffa073256eaba17454d4132539cbe09a2a37edd14a9644ec06 +size 21318 diff --git a/data/2025/2504_05xxx/2504.05792/images/37093a2ca762a3380380c7f8cf3e0bb02c3ef35903771b9c7dd9cbf571d85434.jpg b/data/2025/2504_05xxx/2504.05792/images/37093a2ca762a3380380c7f8cf3e0bb02c3ef35903771b9c7dd9cbf571d85434.jpg new file mode 100644 index 0000000000000000000000000000000000000000..80a42ad62bc9a8b75b047506c01273f76415b780 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/37093a2ca762a3380380c7f8cf3e0bb02c3ef35903771b9c7dd9cbf571d85434.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d5152a27e2ff50247823e7a806ec3cef71cf02f0ebc1898b8606b306cb0455b +size 9266 diff --git a/data/2025/2504_05xxx/2504.05792/images/44402e915e3a77623794fffb1854bee176c10134d19af786ec61562a51c00f68.jpg b/data/2025/2504_05xxx/2504.05792/images/44402e915e3a77623794fffb1854bee176c10134d19af786ec61562a51c00f68.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e2d7bfaae31c833f299e55b49f5a9d0b471483df --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/44402e915e3a77623794fffb1854bee176c10134d19af786ec61562a51c00f68.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:251b780fd8d5d7aad1a29cadb396fadcf6daab6388d5114991ea9d946afdeadd +size 6612 diff --git a/data/2025/2504_05xxx/2504.05792/images/486c8e561be8deccf96b5bf141f51114caaddd2b6a1e8ef24fb805f7283bf4b4.jpg b/data/2025/2504_05xxx/2504.05792/images/486c8e561be8deccf96b5bf141f51114caaddd2b6a1e8ef24fb805f7283bf4b4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3741f7876e262b7dad40bc1d1b8c7cbd632fe3e0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/486c8e561be8deccf96b5bf141f51114caaddd2b6a1e8ef24fb805f7283bf4b4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f915d279b1546ee33ca8c80f973c585d484f619d9c49e07d4a59623539ec90c +size 7456 diff --git a/data/2025/2504_05xxx/2504.05792/images/547d79cbc78526a73c650c0b1ea306da82b683b588de1e662784af6b3e9448c8.jpg b/data/2025/2504_05xxx/2504.05792/images/547d79cbc78526a73c650c0b1ea306da82b683b588de1e662784af6b3e9448c8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a4a09e180d41299e6d0dfc1cafc9f8849ba5cc89 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/547d79cbc78526a73c650c0b1ea306da82b683b588de1e662784af6b3e9448c8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8499929325f913ff86662ced6ad81634077db887126b7b1e9a960cc38151434b +size 8093 diff --git a/data/2025/2504_05xxx/2504.05792/images/5959a4483a3d708e1fda07cb15e0ba8ae7a2653fca446613aace2299d7205a6c.jpg b/data/2025/2504_05xxx/2504.05792/images/5959a4483a3d708e1fda07cb15e0ba8ae7a2653fca446613aace2299d7205a6c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..52021f5c39f933a25c44ccf89d948718ec7c87b0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/5959a4483a3d708e1fda07cb15e0ba8ae7a2653fca446613aace2299d7205a6c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57e50f83fbea961e0687a1cc914e423f944cc2da492269cb25ac82ff04004161 +size 13788 diff --git a/data/2025/2504_05xxx/2504.05792/images/5a1798459fbd0aa32c927f7230e28a18dc94758ab47ca58020471c74159cf48b.jpg b/data/2025/2504_05xxx/2504.05792/images/5a1798459fbd0aa32c927f7230e28a18dc94758ab47ca58020471c74159cf48b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc520513075a04e54af908bce8d5a20b86aafcff --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/5a1798459fbd0aa32c927f7230e28a18dc94758ab47ca58020471c74159cf48b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a126e3df359a2882693973352e868765cd4b35c0316e823bc66d3b610b4de60b +size 10791 diff --git a/data/2025/2504_05xxx/2504.05792/images/5a6335b3a9289fd503a22efb880ca732737dbd0013361107512aafb8a6d63d92.jpg b/data/2025/2504_05xxx/2504.05792/images/5a6335b3a9289fd503a22efb880ca732737dbd0013361107512aafb8a6d63d92.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9c54aa1c7ad8ca40767c7afc4200b11692bebf79 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/5a6335b3a9289fd503a22efb880ca732737dbd0013361107512aafb8a6d63d92.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6effc7410bfc411bac804c66db9a80dcf852baaf4aaf7b0867e64b7e0932c6e8 +size 15647 diff --git a/data/2025/2504_05xxx/2504.05792/images/70a9e0c74d1e98b64a55490fd48b8e41bcf0c6a40007388bf03a0e841d221535.jpg b/data/2025/2504_05xxx/2504.05792/images/70a9e0c74d1e98b64a55490fd48b8e41bcf0c6a40007388bf03a0e841d221535.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bac22c49e77dd3eabd75f975971b755a1567f76e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/70a9e0c74d1e98b64a55490fd48b8e41bcf0c6a40007388bf03a0e841d221535.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18304266ce926efbc3fb250d69c08fa42b0cf804d248fbf3dfdad440a8612fce +size 6562 diff --git a/data/2025/2504_05xxx/2504.05792/images/746490cd069ab9d9602ce83c59d0e2cbee3542393c6445337af101da905659a7.jpg b/data/2025/2504_05xxx/2504.05792/images/746490cd069ab9d9602ce83c59d0e2cbee3542393c6445337af101da905659a7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb0db09fc5f4ce308727562b0bfb4a18f6fab2d1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/746490cd069ab9d9602ce83c59d0e2cbee3542393c6445337af101da905659a7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a3e7f54ee6b06e3b50e79d36856b9356100c9524e5badc98fe4e788422ba232 +size 8214 diff --git a/data/2025/2504_05xxx/2504.05792/images/7957cdc5bc4aca57f3b61f03118c9f6b7913f87249d6ba6442b9ac408f465fd0.jpg b/data/2025/2504_05xxx/2504.05792/images/7957cdc5bc4aca57f3b61f03118c9f6b7913f87249d6ba6442b9ac408f465fd0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3f452ab39f93f5c59d9fc353fd185c2dfafd3dae --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/7957cdc5bc4aca57f3b61f03118c9f6b7913f87249d6ba6442b9ac408f465fd0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b157ccc152b814ec7a547f04bcb4cdbbf06227984785dd35b9a1a4cef602c71 +size 7193 diff --git a/data/2025/2504_05xxx/2504.05792/images/7ad71bfd1373f294d6e00d260fdb51171b289ae442cf09cc66913fd30f1da686.jpg b/data/2025/2504_05xxx/2504.05792/images/7ad71bfd1373f294d6e00d260fdb51171b289ae442cf09cc66913fd30f1da686.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e5640cb58ba4ba2fcbfebe5aa5ea05d2e2e0aea --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/7ad71bfd1373f294d6e00d260fdb51171b289ae442cf09cc66913fd30f1da686.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b408777858fddded7e145da22e806843564fce4ccdb891f10fd52ed75e387484 +size 2955 diff --git a/data/2025/2504_05xxx/2504.05792/images/809d2cd5feaaa0dd5d06c213f31f3beb69e233a29002300ab0744dc226b58530.jpg b/data/2025/2504_05xxx/2504.05792/images/809d2cd5feaaa0dd5d06c213f31f3beb69e233a29002300ab0744dc226b58530.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ead339d803f5db68957ec8cecd03b06ccc97e418 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/809d2cd5feaaa0dd5d06c213f31f3beb69e233a29002300ab0744dc226b58530.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1443c728df1f46be0805a3b95dbb854d0bc593a7d3b35f18405701000d8f34fc +size 11904 diff --git a/data/2025/2504_05xxx/2504.05792/images/8a4c9d534d1ff08f2e240c8532c680172fe6f3b55af457cc99aeec8c9ebd07f9.jpg b/data/2025/2504_05xxx/2504.05792/images/8a4c9d534d1ff08f2e240c8532c680172fe6f3b55af457cc99aeec8c9ebd07f9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..65026b32c52049364fd0d97961cd47ebafa38a9f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/8a4c9d534d1ff08f2e240c8532c680172fe6f3b55af457cc99aeec8c9ebd07f9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9528eacd680fbcab5791c0d701bbeb88d1a4f9f7f064846b05f2de642fe0723a +size 19963 diff --git a/data/2025/2504_05xxx/2504.05792/images/8c86d7c82be13cfdb700185fedbcfc043903bcacc188a31c96bd78c0d022b95d.jpg b/data/2025/2504_05xxx/2504.05792/images/8c86d7c82be13cfdb700185fedbcfc043903bcacc188a31c96bd78c0d022b95d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1724f26cd5a0f491abd47ea9acd2c22d09a55cc5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/8c86d7c82be13cfdb700185fedbcfc043903bcacc188a31c96bd78c0d022b95d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd4f01908876e5b2e88c21265ba00eaf1956cd5d43d56d2277e584f721a66d91 +size 15062 diff --git a/data/2025/2504_05xxx/2504.05792/images/95e52804193e9fcc17868d063866468da9677490c21f4494ad8893fb6d18da17.jpg b/data/2025/2504_05xxx/2504.05792/images/95e52804193e9fcc17868d063866468da9677490c21f4494ad8893fb6d18da17.jpg new file mode 100644 index 0000000000000000000000000000000000000000..85f0b1b86ac65804d4cbb798e040513b14b04774 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/95e52804193e9fcc17868d063866468da9677490c21f4494ad8893fb6d18da17.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56e6264cc7034b1740d8520f6411e5e3669e94ba1cbbd50b7a3f904cfce13f08 +size 19272 diff --git a/data/2025/2504_05xxx/2504.05792/images/966213d6c77ba0bd668393a5c102d36667fbb01f2944f19727b0f886aacdb2d4.jpg b/data/2025/2504_05xxx/2504.05792/images/966213d6c77ba0bd668393a5c102d36667fbb01f2944f19727b0f886aacdb2d4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7c47fe5501f514bf52fe8e5b03137443730f0823 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/966213d6c77ba0bd668393a5c102d36667fbb01f2944f19727b0f886aacdb2d4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b33129974040490f27d1d1721b984de76916cbdd706256c717284d5b3332f3d +size 22519 diff --git a/data/2025/2504_05xxx/2504.05792/images/9c2b4a794f70a8d57f2ebe42f9fb43fa9645319cfa28d3e7c92d4d5f0038df56.jpg b/data/2025/2504_05xxx/2504.05792/images/9c2b4a794f70a8d57f2ebe42f9fb43fa9645319cfa28d3e7c92d4d5f0038df56.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f2152e65df2d84b91e7a30bbbf783ab6cf7d7515 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/9c2b4a794f70a8d57f2ebe42f9fb43fa9645319cfa28d3e7c92d4d5f0038df56.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f19191592a617a5b00665944c4b602ff201244019d49c76252b98560f8c3068 +size 8386 diff --git a/data/2025/2504_05xxx/2504.05792/images/b5909af4d94bbfcfd073d85cb6beeafb95d51b8b085babdd36c019a606df5110.jpg b/data/2025/2504_05xxx/2504.05792/images/b5909af4d94bbfcfd073d85cb6beeafb95d51b8b085babdd36c019a606df5110.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1b484f519b688037cd14e0309c763c37773e876c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/b5909af4d94bbfcfd073d85cb6beeafb95d51b8b085babdd36c019a606df5110.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc0f34e8bbbad604b238b309044c8b7c53a638626578beb6498eab9f123130eb +size 22504 diff --git a/data/2025/2504_05xxx/2504.05792/images/b5c18237860340138586e9dc7f54cb91dbb12c3aa6edb0d714545f8f944c6b5c.jpg b/data/2025/2504_05xxx/2504.05792/images/b5c18237860340138586e9dc7f54cb91dbb12c3aa6edb0d714545f8f944c6b5c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a382d364ec12a9590e1ca676e7b6055e67e76a9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/b5c18237860340138586e9dc7f54cb91dbb12c3aa6edb0d714545f8f944c6b5c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5daa9f0de7bb776eaac2b0b6c533097dbf65196840e7c2659454fa24fb0d9a8a +size 8767 diff --git a/data/2025/2504_05xxx/2504.05792/images/bb13085ae2c5a7cb4bdb6ceee275365f3936f065611f1ca97fcd535b83dce0d7.jpg b/data/2025/2504_05xxx/2504.05792/images/bb13085ae2c5a7cb4bdb6ceee275365f3936f065611f1ca97fcd535b83dce0d7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5932c23fcadc7af596163296e3b6f0510e9ed45e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/bb13085ae2c5a7cb4bdb6ceee275365f3936f065611f1ca97fcd535b83dce0d7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e481410c5200a3e011b7f71e1601d3a63bc4d244eac93ab306889c9eb14f174 +size 34069 diff --git a/data/2025/2504_05xxx/2504.05792/images/c075e3bc68e2e5ee15dc6116b2ff2a6810ae8aadf71013e8d8de860a1ed1c4a4.jpg b/data/2025/2504_05xxx/2504.05792/images/c075e3bc68e2e5ee15dc6116b2ff2a6810ae8aadf71013e8d8de860a1ed1c4a4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cc9e637a598e0b3f37751b15d5a0f6a3b6c261c1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/c075e3bc68e2e5ee15dc6116b2ff2a6810ae8aadf71013e8d8de860a1ed1c4a4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27b614fdc7aa4827ce99540136e9d229ad432c805431165a8973f975aecfed1d +size 2629 diff --git a/data/2025/2504_05xxx/2504.05792/images/c09adac5029f3bb246ac669f5737eb95ab3d1703a013baaefd01eeba05fdbb53.jpg b/data/2025/2504_05xxx/2504.05792/images/c09adac5029f3bb246ac669f5737eb95ab3d1703a013baaefd01eeba05fdbb53.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a286df784680d1f5a3e8ca4935307b1808952fd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/c09adac5029f3bb246ac669f5737eb95ab3d1703a013baaefd01eeba05fdbb53.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d2f9779b56c77cdb58d06f7610dc2807d456a6c13911abc3e9fff7caf0b78bf +size 9038 diff --git a/data/2025/2504_05xxx/2504.05792/images/de2fd461e68d196df1294074a08a6e8f1ae03b5be4657fc9a3ee1fe1bcd21315.jpg b/data/2025/2504_05xxx/2504.05792/images/de2fd461e68d196df1294074a08a6e8f1ae03b5be4657fc9a3ee1fe1bcd21315.jpg new file mode 100644 index 0000000000000000000000000000000000000000..795acf3db8e3fc5c4275b6cce9476c9682382284 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/de2fd461e68d196df1294074a08a6e8f1ae03b5be4657fc9a3ee1fe1bcd21315.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33b07700a403f81a61e5de1f8e5040e999186174773b1def13b7b5f75730a361 +size 26517 diff --git a/data/2025/2504_05xxx/2504.05792/images/e38af74bd0ebaec881bdde2aca1a48ff6d2810acd55316bfa5058f251dae72d6.jpg b/data/2025/2504_05xxx/2504.05792/images/e38af74bd0ebaec881bdde2aca1a48ff6d2810acd55316bfa5058f251dae72d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4bb6c57775066085e1e9f008b1bfc84f59a34eb9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/e38af74bd0ebaec881bdde2aca1a48ff6d2810acd55316bfa5058f251dae72d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcee069c68ad9bba3e8e94cf6732cca36c173fa5f148b1ee8dcd5ca36cdb4b7d +size 9419 diff --git a/data/2025/2504_05xxx/2504.05792/images/e87b58de530b765bf6e415c59ae2d58cb9d534917313e5b61bd88443bf0d9437.jpg b/data/2025/2504_05xxx/2504.05792/images/e87b58de530b765bf6e415c59ae2d58cb9d534917313e5b61bd88443bf0d9437.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eee556fa2037155a26efbce8320acf6be9936334 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/images/e87b58de530b765bf6e415c59ae2d58cb9d534917313e5b61bd88443bf0d9437.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48e7b4bf3ad61a630597fa127d8008cc3411b3c0e0ca1bf6dd2fcdc78f41b882 +size 9728 diff --git a/data/2025/2504_05xxx/2504.05792/layout.json b/data/2025/2504_05xxx/2504.05792/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..c91d784fbc37fb804432ca1b02ef2c25736dc879 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05792/layout.json @@ -0,0 +1,7601 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 76, + 33, + 534, + 55 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 33, + 534, + 55 + ], + "spans": [ + { + "bbox": [ + 76, + 33, + 534, + 55 + ], + "type": "text", + "content": "Pinching-Antenna Assisted ISAC: A CRLB Perspective" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 239, + 63, + 367, + 76 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 63, + 367, + 76 + ], + "spans": [ + { + "bbox": [ + 239, + 63, + 367, + 76 + ], + "type": "text", + "content": "Zhiguo Ding, Fellow, IEEE" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 91, + 301, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 91, + 301, + 223 + ], + "spans": [ + { + "bbox": [ + 45, + 91, + 301, + 223 + ], + "type": "text", + "content": "Abstract—Recently, pinching antennas have attracted significant research interest due to their capability to reconfigure wireless channels as well as their array configuration flexibility. This letter focuses on how these features can be used to support integrated sensing and communications (ISAC) from the Cramér-Rao lower bound (CRLB) perspective. In particular, the CRLB achieved by pinching antennas is first derived and then compared to that of conventional antennas. The presented analytical and simulation results demonstrate that using pinching antennas can significantly reduce CRLB and, hence, enhance positioning accuracy. In addition, this letter also reveals that the low-cost and reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 224, + 301, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 224, + 301, + 255 + ], + "spans": [ + { + "bbox": [ + 45, + 224, + 301, + 255 + ], + "type": "text", + "content": "Index Terms—Pinching antennas, integrated sensing and communications (ISAC), Cramér-Rao lower bound (CRLB), estimation theory." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 258, + 214, + 269 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 258, + 214, + 269 + ], + "spans": [ + { + "bbox": [ + 132, + 258, + 214, + 269 + ], + "type": "text", + "content": "I. INTRODUCTION" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 274, + 301, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 274, + 301, + 429 + ], + "spans": [ + { + "bbox": [ + 45, + 274, + 301, + 429 + ], + "type": "text", + "content": "Recently, pinching antennas have received significant attention from both academia and industry as a novel evolution of smart antennas, and offer three distinguished features [1], [2]. One is their capability to create strong line-of-sight (LoS) links between the transceivers, which means that large-scale path losses and LoS blockage can be effectively mitigated by activating antennas close to users [3]. The second feature is the reconfigurability of pinching-antenna systems, where the topology of a pinching-antenna array, e.g., the locations and the number of pinching antennas, can be flexibly adjusted. The third feature is their practicality, where DOCOMO's prototype shows that pinching antennas can be straightforwardly implemented in a low-cost manner [1]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 430, + 301, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 430, + 301, + 669 + ], + "spans": [ + { + "bbox": [ + 45, + 430, + 301, + 669 + ], + "type": "text", + "content": "In the literature, there already exists a large amount of work to demonstrate that the use of pinching antennas can significantly enhance the communication functionality of wireless networks. For example, the fundamental issues of pinching antennas, such as antenna activation, the architecture of a pinching-antenna array, and the array gains, have been investigated in [4]–[6]. Antenna placement is key to realizing the full potential of pinching-antenna systems, where various designs and their impact on the system throughput have been investigated in [7], [8]. Channel estimation and beam training are crucial issues to pinching-antenna systems, and sophisticated designs using the flexibility features of pinching antennas have been developed in [9], [10]. For many resource allocation problems encountered in pinching-antenna systems, the use of conventional convex optimization leads to high computational complexity, which motivates the application of advanced learning methods [11], [12]. The applications of pinching antennas to improve the uplink throughput and the security of communication networks have also been recently investigated in [13], [14]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 670, + 301, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 670, + 301, + 718 + ], + "spans": [ + { + "bbox": [ + 45, + 670, + 301, + 718 + ], + "type": "text", + "content": "However, we note that the impact of pinching antennas on the sensing functionality of wireless networks has not yet been fully characterized in the literature, although the recent work in [15] demonstrated the importance of pinching" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 719, + 301, + 738 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 719, + 301, + 738 + ], + "spans": [ + { + "bbox": [ + 45, + 719, + 301, + 738 + ], + "type": "text", + "content": "Z. Ding is with the University of Manchester, Manchester, M1 9BB, UK, and Khalifa University, Abu Dhabi, UAE." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 91, + 566, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 91, + 566, + 295 + ], + "spans": [ + { + "bbox": [ + 307, + 91, + 566, + 295 + ], + "type": "text", + "content": "antennas in integrated sensing and communication (ISAC) systems [16], which motivates this letter. In particular, in this letter, the Cramér-Rao lower bound (CRLB) is used as the performance metric to characterize the capability of pinching antennas for enhancing the positioning accuracy of ISAC networks. The CRLB achieved by pinching antennas is first derived in the letter, and then compared to conventional antennas. The presented analytical results reveal that the use of pinching antennas can ensure that users at different locations experience uniform positioning accuracy, whereas the use of conventional antennas can result in a significant disparity in accuracy among the users. In addition, the important properties of CRLB achieved by pinching antennas, such as the effects of antenna placement and the local maximums of CRLB, are also investigated. Furthermore, this letter also reveals that the low-cost and reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 392, + 308, + 481, + 319 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 392, + 308, + 481, + 319 + ], + "spans": [ + { + "bbox": [ + 392, + 308, + 481, + 319 + ], + "type": "text", + "content": "II. SYSTEM MODEL" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 327, + 564, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 327, + 564, + 447 + ], + "spans": [ + { + "bbox": [ + 307, + 327, + 564, + 447 + ], + "type": "text", + "content": "Consider a pinching-antenna system that is deployed to provide ISAC services to " + }, + { + "bbox": [ + 307, + 327, + 564, + 447 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 307, + 327, + 564, + 447 + ], + "type": "text", + "content": " single-antenna users, denoted by " + }, + { + "bbox": [ + 307, + 327, + 564, + 447 + ], + "type": "inline_equation", + "content": "\\mathrm{U}_m" + }, + { + "bbox": [ + 307, + 327, + 564, + 447 + ], + "type": "text", + "content": ". Given the fact that there is already a rich literature on using pinching antennas to enhance communications, and also due to space limitations, the impact of pinching antennas on the sensing functionality is focused on in this letter. Without loss of generality, assume that " + }, + { + "bbox": [ + 307, + 327, + 564, + 447 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 307, + 327, + 564, + 447 + ], + "type": "text", + "content": " pinching antennas are activated on " + }, + { + "bbox": [ + 307, + 327, + 564, + 447 + ], + "type": "inline_equation", + "content": "N_{\\mathrm{WG}}" + }, + { + "bbox": [ + 307, + 327, + 564, + 447 + ], + "type": "text", + "content": " waveguides. The location of the " + }, + { + "bbox": [ + 307, + 327, + 564, + 447 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 307, + 327, + 564, + 447 + ], + "type": "text", + "content": "-th pinching antenna is denoted by " + }, + { + "bbox": [ + 307, + 327, + 564, + 447 + ], + "type": "inline_equation", + "content": "\\psi_n^{\\mathrm{Pin}} = (x_n^{\\mathrm{Pin}}, y_n^{\\mathrm{Pin}}, d_{\\mathrm{H}})" + }, + { + "bbox": [ + 307, + 327, + 564, + 447 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 307, + 327, + 564, + 447 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{H}}" + }, + { + "bbox": [ + 307, + 327, + 564, + 447 + ], + "type": "text", + "content": " denotes the height of the waveguides." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 448, + 564, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 448, + 564, + 508 + ], + "spans": [ + { + "bbox": [ + 308, + 448, + 564, + 508 + ], + "type": "text", + "content": "The service area is denoted by " + }, + { + "bbox": [ + 308, + 448, + 564, + 508 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 308, + 448, + 564, + 508 + ], + "type": "text", + "content": " and is assumed to be a rectangle with its two sides denoted by " + }, + { + "bbox": [ + 308, + 448, + 564, + 508 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{W}}" + }, + { + "bbox": [ + 308, + 448, + 564, + 508 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 308, + 448, + 564, + 508 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{L}}" + }, + { + "bbox": [ + 308, + 448, + 564, + 508 + ], + "type": "text", + "content": ", respectively, and its center located at " + }, + { + "bbox": [ + 308, + 448, + 564, + 508 + ], + "type": "inline_equation", + "content": "(0,0,0)" + }, + { + "bbox": [ + 308, + 448, + 564, + 508 + ], + "type": "text", + "content": ". The users are assumed to be uniformly distributed in " + }, + { + "bbox": [ + 308, + 448, + 564, + 508 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 308, + 448, + 564, + 508 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 308, + 448, + 564, + 508 + ], + "type": "inline_equation", + "content": "\\mathrm{U}_m" + }, + { + "bbox": [ + 308, + 448, + 564, + 508 + ], + "type": "text", + "content": "'s location is denoted by " + }, + { + "bbox": [ + 308, + 448, + 564, + 508 + ], + "type": "inline_equation", + "content": "\\psi_m = (x_m,y_m,0)" + }, + { + "bbox": [ + 308, + 448, + 564, + 508 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 508, + 564, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 508, + 564, + 544 + ], + "spans": [ + { + "bbox": [ + 308, + 508, + 564, + 544 + ], + "type": "text", + "content": "Denote the distance from the " + }, + { + "bbox": [ + 308, + 508, + 564, + 544 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 308, + 508, + 564, + 544 + ], + "type": "text", + "content": "-th pinching antenna to the " + }, + { + "bbox": [ + 308, + 508, + 564, + 544 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 308, + 508, + 564, + 544 + ], + "type": "text", + "content": "-th user by " + }, + { + "bbox": [ + 308, + 508, + 564, + 544 + ], + "type": "inline_equation", + "content": "d_{mn}" + }, + { + "bbox": [ + 308, + 508, + 564, + 544 + ], + "type": "text", + "content": ". Distance (range) estimates for the " + }, + { + "bbox": [ + 308, + 508, + 564, + 544 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 308, + 508, + 564, + 544 + ], + "type": "text", + "content": "-th user can be modeled as follows: [17]" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 392, + 552, + 563, + 566 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 392, + 552, + 563, + 566 + ], + "spans": [ + { + "bbox": [ + 392, + 552, + 563, + 566 + ], + "type": "interline_equation", + "content": "\\hat {d} _ {m n} = d _ {m n} + w _ {m n}, \\tag {1}", + "image_path": "7ad71bfd1373f294d6e00d260fdb51171b289ae442cf09cc66913fd30f1da686.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 576, + 564, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 576, + 564, + 617 + ], + "spans": [ + { + "bbox": [ + 308, + 576, + 564, + 617 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 308, + 576, + 564, + 617 + ], + "type": "inline_equation", + "content": "d_{mn} = \\sqrt{(x_m - x_n^{\\mathrm{Pin}})^2 + (y_m - y_n^{\\mathrm{Pin}})^2 + d_{\\mathrm{H}}^2}" + }, + { + "bbox": [ + 308, + 576, + 564, + 617 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 308, + 576, + 564, + 617 + ], + "type": "inline_equation", + "content": "w_{mn}" + }, + { + "bbox": [ + 308, + 576, + 564, + 617 + ], + "type": "text", + "content": " is a zero-mean Gaussian distributed noise term whose variance is distance-dependent, i.e.," + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 321, + 624, + 563, + 643 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 624, + 563, + 643 + ], + "spans": [ + { + "bbox": [ + 321, + 624, + 563, + 643 + ], + "type": "interline_equation", + "content": "\\sigma_ {m n} ^ {2} = K _ {E} \\left(\\left(x _ {m} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + \\left(y _ {m} - y _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right), \\tag {2}", + "image_path": "44402e915e3a77623794fffb1854bee176c10134d19af786ec61562a51c00f68.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 650, + 563, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 650, + 563, + 673 + ], + "spans": [ + { + "bbox": [ + 308, + 650, + 563, + 673 + ], + "type": "inline_equation", + "content": "K_{E}" + }, + { + "bbox": [ + 308, + 650, + 563, + 673 + ], + "type": "text", + "content": " denotes a system parameter decided by the range estimation environment." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 690, + 555, + 701 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 690, + 555, + 701 + ], + "spans": [ + { + "bbox": [ + 315, + 690, + 555, + 701 + ], + "type": "text", + "content": "III. IMPACT OF PINCHING ANTENNAS ON POSITIONING" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 706, + 515, + 719 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 706, + 515, + 719 + ], + "spans": [ + { + "bbox": [ + 308, + 706, + 515, + 719 + ], + "type": "text", + "content": "A. CRLB Achieved by Pinching-Antenna Systems" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "text", + "content": "Without loss of generality, the impact of pinching antennas on " + }, + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "inline_equation", + "content": "\\mathrm{U}_m" + }, + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "text", + "content": " 's localization is focused on. The joint probability den" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 14, + 202, + 36, + 536 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 202, + 36, + 536 + ], + "spans": [ + { + "bbox": [ + 14, + 202, + 36, + 536 + ], + "type": "text", + "content": "arXiv:2504.05792v1 [cs.IT] 8 Apr 2025" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 53, + 301, + 79 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 53, + 301, + 79 + ], + "spans": [ + { + "bbox": [ + 45, + 53, + 301, + 79 + ], + "type": "text", + "content": "sity function (pdf) of " + }, + { + "bbox": [ + 45, + 53, + 301, + 79 + ], + "type": "inline_equation", + "content": "\\hat{d}_{mn}" + }, + { + "bbox": [ + 45, + 53, + 301, + 79 + ], + "type": "text", + "content": " conditioned on " + }, + { + "bbox": [ + 45, + 53, + 301, + 79 + ], + "type": "inline_equation", + "content": "d_{mn}" + }, + { + "bbox": [ + 45, + 53, + 301, + 79 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 45, + 53, + 301, + 79 + ], + "type": "inline_equation", + "content": "1\\leq n\\leq N" + }, + { + "bbox": [ + 45, + 53, + 301, + 79 + ], + "type": "text", + "content": ", is given by" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 63, + 83, + 301, + 117 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 83, + 301, + 117 + ], + "spans": [ + { + "bbox": [ + 63, + 83, + 301, + 117 + ], + "type": "interline_equation", + "content": "f (\\hat {d} _ {m 1}, \\dots , \\hat {d} _ {m N}) = \\prod_ {n = 1} ^ {N} \\frac {1}{\\sqrt {2 \\pi \\sigma_ {m n} ^ {2}}} e ^ {- \\frac {(\\hat {d} _ {m n} - d _ {m n}) ^ {2}}{2 \\sigma_ {m n} ^ {2}}}, \\tag {3}", + "image_path": "486c8e561be8deccf96b5bf141f51114caaddd2b6a1e8ef24fb805f7283bf4b4.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 120, + 220, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 120, + 220, + 133 + ], + "spans": [ + { + "bbox": [ + 46, + 120, + 220, + 133 + ], + "type": "text", + "content": "whose log-likelihood function is given by" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 87, + 137, + 299, + 194 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 137, + 299, + 194 + ], + "spans": [ + { + "bbox": [ + 87, + 137, + 299, + 194 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} L \\triangleq \\ln f (\\hat {d} _ {m 1}, \\dots , \\hat {d} _ {m N}) = - \\frac {N}{2} \\ln (2 \\pi) \\tag {4} \\\\ - \\sum_ {n = 1} ^ {N} \\ln \\sigma_ {m n} - \\sum_ {n = 1} ^ {N} \\frac {(\\hat {d} _ {m n} - d _ {m n}) ^ {2}}{2 \\sigma_ {m n} ^ {2}}. \\\\ \\end{array}", + "image_path": "5a1798459fbd0aa32c927f7230e28a18dc94758ab47ca58020471c74159cf48b.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 198, + 253, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 198, + 253, + 210 + ], + "spans": [ + { + "bbox": [ + 46, + 198, + 253, + 210 + ], + "type": "text", + "content": "Recall that the CRLB for " + }, + { + "bbox": [ + 46, + 198, + 253, + 210 + ], + "type": "inline_equation", + "content": "x_{m}" + }, + { + "bbox": [ + 46, + 198, + 253, + 210 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 198, + 253, + 210 + ], + "type": "inline_equation", + "content": "y_{m}" + }, + { + "bbox": [ + 46, + 198, + 253, + 210 + ], + "type": "text", + "content": " is given by" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 213, + 299, + 237 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 213, + 299, + 237 + ], + "spans": [ + { + "bbox": [ + 56, + 213, + 299, + 237 + ], + "type": "interline_equation", + "content": "\\mathcal {E} \\left\\{\\left(\\hat {x} _ {m} - x _ {m}\\right) ^ {2} + \\left(\\hat {y} _ {m} - y _ {m}\\right) ^ {2} \\right\\} \\geq \\frac {1}{J _ {x} ^ {m}} + \\frac {1}{J _ {y} ^ {m}} \\triangleq \\mathrm {C R B} _ {m}, \\tag {5}", + "image_path": "70a9e0c74d1e98b64a55490fd48b8e41bcf0c6a40007388bf03a0e841d221535.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 241, + 299, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 241, + 299, + 270 + ], + "spans": [ + { + "bbox": [ + 46, + 241, + 299, + 270 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 241, + 299, + 270 + ], + "type": "inline_equation", + "content": "\\hat{x}_m" + }, + { + "bbox": [ + 46, + 241, + 299, + 270 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 241, + 299, + 270 + ], + "type": "inline_equation", + "content": "\\hat{y}_m" + }, + { + "bbox": [ + 46, + 241, + 299, + 270 + ], + "type": "text", + "content": " denote the estimates of " + }, + { + "bbox": [ + 46, + 241, + 299, + 270 + ], + "type": "inline_equation", + "content": "x_m" + }, + { + "bbox": [ + 46, + 241, + 299, + 270 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 241, + 299, + 270 + ], + "type": "inline_equation", + "content": "y_m" + }, + { + "bbox": [ + 46, + 241, + 299, + 270 + ], + "type": "text", + "content": ", respectively, " + }, + { + "bbox": [ + 46, + 241, + 299, + 270 + ], + "type": "inline_equation", + "content": "J_x^m = \\mathcal{E}\\left\\{-\\frac{\\partial^2L}{\\partial x_m^2}\\right\\}" + }, + { + "bbox": [ + 46, + 241, + 299, + 270 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 241, + 299, + 270 + ], + "type": "inline_equation", + "content": "J_y^m = \\mathcal{E}\\left\\{-\\frac{\\partial^2L}{\\partial y_m^2}\\right\\}" + }, + { + "bbox": [ + 46, + 241, + 299, + 270 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 269, + 193, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 269, + 193, + 284 + ], + "spans": [ + { + "bbox": [ + 56, + 269, + 193, + 284 + ], + "type": "inline_equation", + "content": "\\frac{\\partial L}{\\partial x_m}" + }, + { + "bbox": [ + 56, + 269, + 193, + 284 + ], + "type": "text", + "content": " can be obtained as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 289, + 299, + 355 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 289, + 299, + 355 + ], + "spans": [ + { + "bbox": [ + 56, + 289, + 299, + 355 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\frac {\\partial L}{\\partial x _ {m}} = - \\sum_ {n = 1} ^ {N} \\frac {1}{\\sigma_ {m n}} \\frac {\\partial \\sigma_ {m n}}{\\partial x _ {m}} - \\sum_ {n = 1} ^ {N} \\frac {\\left(d _ {m n} - \\hat {d} _ {m n}\\right)}{\\sigma_ {m n} ^ {2}} \\frac {\\partial d _ {m n}}{\\partial x _ {m}} \\tag {6} \\\\ + \\sum_ {n = 1} ^ {N} \\frac {(\\hat {d} _ {m n} - d _ {m n}) ^ {2}}{\\sigma_ {m n} ^ {3}} \\frac {\\partial \\sigma_ {m n}}{\\partial x _ {m}}. \\\\ \\end{array}", + "image_path": "5a6335b3a9289fd503a22efb880ca732737dbd0013361107512aafb8a6d63d92.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 361, + 301, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 361, + 301, + 413 + ], + "spans": [ + { + "bbox": [ + 45, + 361, + 301, + 413 + ], + "type": "text", + "content": "The expression of " + }, + { + "bbox": [ + 45, + 361, + 301, + 413 + ], + "type": "inline_equation", + "content": "\\frac{\\partial^2L}{\\partial x_m^2}" + }, + { + "bbox": [ + 45, + 361, + 301, + 413 + ], + "type": "text", + "content": " is quite invoked; however, by using the fact that " + }, + { + "bbox": [ + 45, + 361, + 301, + 413 + ], + "type": "inline_equation", + "content": "\\mathcal{E}\\{\\hat{d}_{mn} - d_{mn}\\} = 0" + }, + { + "bbox": [ + 45, + 361, + 301, + 413 + ], + "type": "text", + "content": " and following the steps similar to those in [17], the expectation of " + }, + { + "bbox": [ + 45, + 361, + 301, + 413 + ], + "type": "inline_equation", + "content": "\\frac{\\partial^2L}{\\partial x_m^2}" + }, + { + "bbox": [ + 45, + 361, + 301, + 413 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 45, + 361, + 301, + 413 + ], + "type": "inline_equation", + "content": "J_x^m" + }, + { + "bbox": [ + 45, + 361, + 301, + 413 + ], + "type": "text", + "content": ", can be obtained as follows:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 417, + 299, + 460 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 417, + 299, + 460 + ], + "spans": [ + { + "bbox": [ + 50, + 417, + 299, + 460 + ], + "type": "interline_equation", + "content": "J _ {x} ^ {m} = \\sum_ {n = 1} ^ {N} \\frac {\\left(2 K _ {E} + 1\\right)}{\\sigma_ {m n} ^ {2}} \\frac {\\left(x _ {m} - x _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2}}{\\left(x _ {m} - x _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2} + \\left(y _ {m} - y _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}}. \\tag {7}", + "image_path": "0b193879da9018fe9108b7b97eb52ae05825c2c4ca59ee37a9665be50f9dd931.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 466, + 301, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 466, + 301, + 502 + ], + "spans": [ + { + "bbox": [ + 45, + 466, + 301, + 502 + ], + "type": "inline_equation", + "content": "J_{y}^{m}" + }, + { + "bbox": [ + 45, + 466, + 301, + 502 + ], + "type": "text", + "content": " can be obtained in a similar form, which means that the CRLB for estimating " + }, + { + "bbox": [ + 45, + 466, + 301, + 502 + ], + "type": "inline_equation", + "content": "\\mathrm{U}_m" + }, + { + "bbox": [ + 45, + 466, + 301, + 502 + ], + "type": "text", + "content": " 's location can be expressed as follows:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 505, + 299, + 596 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 505, + 299, + 596 + ], + "spans": [ + { + "bbox": [ + 47, + 505, + 299, + 596 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathrm {C R B} _ {m} = \\frac {K _ {E}}{(2 K _ {E} + 1)} \\left(\\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(x _ {m} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2}}{\\left(\\left(x _ {m} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + \\left(y _ {m} - y _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}} \\right. \\\\ \\left. + \\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(y _ {m} - y _ {n} ^ {\\operatorname* {P i n}}\\right) ^ {2}}{\\left(\\left(x _ {m} - x _ {n} ^ {\\operatorname* {P i n}}\\right) ^ {2} + \\left(y _ {m} - y _ {n} ^ {\\operatorname* {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}}\\right). \\tag {8} \\\\ \\end{array}", + "image_path": "8a4c9d534d1ff08f2e240c8532c680172fe6f3b55af457cc99aeec8c9ebd07f9.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 610, + 220, + 622 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 610, + 220, + 622 + ], + "spans": [ + { + "bbox": [ + 46, + 610, + 220, + 622 + ], + "type": "text", + "content": "B. Performance Analysis Based on CRLB" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 45, + 624, + 301, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 624, + 301, + 713 + ], + "spans": [ + { + "bbox": [ + 45, + 624, + 301, + 713 + ], + "type": "text", + "content": "1) Performance Gain over Conventional Antennas: For the conventional-antenna benchmark, consider the use of a circular antenna array with its center located at " + }, + { + "bbox": [ + 45, + 624, + 301, + 713 + ], + "type": "inline_equation", + "content": "(0,0,0)" + }, + { + "bbox": [ + 45, + 624, + 301, + 713 + ], + "type": "text", + "content": " and its radius being " + }, + { + "bbox": [ + 45, + 624, + 301, + 713 + ], + "type": "inline_equation", + "content": "\\frac{\\lambda}{4\\sin\\left(\\frac{\\pi}{N}\\right)}" + }, + { + "bbox": [ + 45, + 624, + 301, + 713 + ], + "type": "text", + "content": ", which ensures that the minimal pairwise distance of the antennas is " + }, + { + "bbox": [ + 45, + 624, + 301, + 713 + ], + "type": "inline_equation", + "content": "\\frac{\\lambda}{2}" + }, + { + "bbox": [ + 45, + 624, + 301, + 713 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 45, + 624, + 301, + 713 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 45, + 624, + 301, + 713 + ], + "type": "text", + "content": " denotes the wavelength. By using the fact that the users are uniformly distributed within the service area, the performance gain of pinching antennas over conventional antennas can be evaluated as follows:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 56, + 715, + 299, + 747 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 715, + 299, + 747 + ], + "spans": [ + { + "bbox": [ + 56, + 715, + 299, + 747 + ], + "type": "interline_equation", + "content": "\\Delta_ {\\mathrm {C R B}} = \\int_ {- \\frac {D _ {\\mathrm {L}}}{2}} ^ {\\frac {D _ {\\mathrm {L}}}{2}} \\int_ {- \\frac {D _ {\\mathrm {W}}}{2}} ^ {\\frac {D _ {\\mathrm {W}}}{2}} \\left(\\mathrm {C R B} _ {m} - \\mathrm {C R B} _ {m} ^ {\\text {C o n v}}\\right) \\frac {d y _ {m}}{D _ {\\mathrm {W}}} \\frac {d x _ {m}}{D _ {\\mathrm {L}}}, \\tag {9}", + "image_path": "1dccd3cab821d5c1eaf6b5880ef6b79971c23a5b46324e9c72aad3ff00c1e95a.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 53, + 564, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 53, + 564, + 175 + ], + "spans": [ + { + "bbox": [ + 307, + 53, + 564, + 175 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 307, + 53, + 564, + 175 + ], + "type": "inline_equation", + "content": "\\mathrm{CRB}_m^{\\mathrm{Conv}}" + }, + { + "bbox": [ + 307, + 53, + 564, + 175 + ], + "type": "text", + "content": " can be obtained similarly to " + }, + { + "bbox": [ + 307, + 53, + 564, + 175 + ], + "type": "inline_equation", + "content": "\\mathrm{CRB}_m" + }, + { + "bbox": [ + 307, + 53, + 564, + 175 + ], + "type": "text", + "content": " by replacing the locations of the pinching antennas with those of the conventional antennas. The performance gain in (9) can be straightforwardly evaluated via computer simulations, but a closed-form expression of " + }, + { + "bbox": [ + 307, + 53, + 564, + 175 + ], + "type": "inline_equation", + "content": "\\Delta_{\\mathrm{CRB}}" + }, + { + "bbox": [ + 307, + 53, + 564, + 175 + ], + "type": "text", + "content": " is difficult to obtain due to the factional expression of the CRLB. We note that the performance gain of pinching antennas over conventional antennas can also be illustrated by simply focusing on the user which is located at " + }, + { + "bbox": [ + 307, + 53, + 564, + 175 + ], + "type": "inline_equation", + "content": "\\left(\\frac{D_{\\mathrm{L}}}{2},0,0\\right)" + }, + { + "bbox": [ + 307, + 53, + 564, + 175 + ], + "type": "text", + "content": ". The use of conventional antennas can achieve the following CRLB:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 322, + 180, + 563, + 371 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 180, + 563, + 371 + ], + "spans": [ + { + "bbox": [ + 322, + 180, + 563, + 371 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathrm {C R B} _ {m} ^ {\\mathrm {C o n v}} = \\left(\\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {C o n v}}\\right) ^ {2}}{\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {C o n v}}\\right) ^ {2} + (y _ {n} ^ {\\mathrm {C o n v}}) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}} \\right. \\\\ \\left. + \\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(y _ {n} ^ {\\text {C o n v}}\\right) ^ {2}}{\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\text {C o n v}}\\right) ^ {2} + \\left(y _ {n} ^ {\\text {C o n v}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}}\\right) \\frac {K _ {E}}{\\left(2 K _ {E} + 1\\right)} \\\\ \\stackrel {(a)} {\\approx} \\frac {K _ {E}}{(2 K _ {E} + 1)} \\left(\\frac {4 \\left(\\frac {D _ {\\mathrm {L}} ^ {2}}{4} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}{N D _ {\\mathrm {L}} ^ {2}} + \\frac {\\left(\\frac {D _ {\\mathrm {L}} ^ {2}}{4} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}{\\sum_ {n = 1} ^ {N} (y _ {n} ^ {\\mathrm {C o n v}}) ^ {2}}\\right) \\\\ \\xrightarrow {(b)} \\infty , \\tag {10} \\\\ \\end{array}", + "image_path": "bb13085ae2c5a7cb4bdb6ceee275365f3936f065611f1ca97fcd535b83dce0d7.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 379, + 563, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 379, + 563, + 438 + ], + "spans": [ + { + "bbox": [ + 307, + 379, + 563, + 438 + ], + "type": "text", + "content": "where step (a) is due to the fact that the conventional antennas are clustered close to the center of the service area, and step (b) is due to the fact that " + }, + { + "bbox": [ + 307, + 379, + 563, + 438 + ], + "type": "inline_equation", + "content": "|y_{n}^{\\mathrm{Conv}}| \\to 0" + }, + { + "bbox": [ + 307, + 379, + 563, + 438 + ], + "type": "text", + "content": " for conventional antennas, particularly for the case with high carrier frequencies (i.e., small wavelengths)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 439, + 564, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 439, + 564, + 487 + ], + "spans": [ + { + "bbox": [ + 308, + 439, + 564, + 487 + ], + "type": "text", + "content": "On the other hand, pinching antennas do not suffer the singularity issue experienced by conventional antennas. For example, for the user located at " + }, + { + "bbox": [ + 308, + 439, + 564, + 487 + ], + "type": "inline_equation", + "content": "\\left(\\frac{D_{\\mathrm{L}}}{2},0,0\\right)" + }, + { + "bbox": [ + 308, + 439, + 564, + 487 + ], + "type": "text", + "content": ", the corresponding CRLB can be expressed as follows:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 494, + 563, + 620 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 494, + 563, + 620 + ], + "spans": [ + { + "bbox": [ + 313, + 494, + 563, + 620 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathrm {C R B} _ {m} = \\frac {K _ {E}}{\\left(2 K _ {E} + 1\\right)} \\left(\\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2}}{\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + (y _ {n} ^ {\\mathrm {P i n}}) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}} \\right. \\\\ \\left. + \\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(y _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2}}{\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + \\left(y _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}}\\right). \\tag {11} \\\\ \\end{array}", + "image_path": "b5909af4d94bbfcfd073d85cb6beeafb95d51b8b085babdd36c019a606df5110.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 625, + 563, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 625, + 563, + 651 + ], + "spans": [ + { + "bbox": [ + 308, + 625, + 563, + 651 + ], + "type": "text", + "content": "For illustrative purposes, a simple upper bound on the CRLB achieved by pinching antennas can be obtained as follows:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 656, + 563, + 746 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 656, + 563, + 746 + ], + "spans": [ + { + "bbox": [ + 313, + 656, + 563, + 746 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathrm {C R B} _ {m} \\leq \\frac {K _ {E}}{\\left(2 K _ {E} + 1\\right)} \\left(\\frac {\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2} + \\left(y _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}{\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2}} \\right. \\\\ \\left. + \\frac {\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\text {P i n}}\\right) ^ {2} + \\left(y _ {n} ^ {\\text {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}{\\left(y _ {n} ^ {\\text {P i n}}\\right) ^ {2}}\\right), \\tag {12} \\\\ \\end{array}", + "image_path": "151d1b9a99db6dd96408c9f3ba705c7a8bdad63c519ff3859798eac266f15097.jpg" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 54, + 301, + 115 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 54, + 301, + 115 + ], + "spans": [ + { + "bbox": [ + 45, + 54, + 301, + 115 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 45, + 54, + 301, + 115 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 45, + 54, + 301, + 115 + ], + "type": "text", + "content": " is an arbitrary integer between 1 and " + }, + { + "bbox": [ + 45, + 54, + 301, + 115 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 45, + 54, + 301, + 115 + ], + "type": "text", + "content": ". Because of the diverse locations of the " + }, + { + "bbox": [ + 45, + 54, + 301, + 115 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 45, + 54, + 301, + 115 + ], + "type": "text", + "content": " pinching antennas, it is always possible to find " + }, + { + "bbox": [ + 45, + 54, + 301, + 115 + ], + "type": "inline_equation", + "content": "n \\in \\{1, \\dots, N\\}" + }, + { + "bbox": [ + 45, + 54, + 301, + 115 + ], + "type": "text", + "content": " which yields a finite value for the upper bound shown in (12), i.e., the CRLB achieved by pinching antennas is always bounded." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 45, + 115, + 301, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 115, + 301, + 163 + ], + "spans": [ + { + "bbox": [ + 45, + 115, + 301, + 163 + ], + "type": "text", + "content": "Remark 1: Unlike conventional antennas which can cause noticeable accuracy variations between users, the carried-out case study shows that pinching antennas have the ability to offer uniform positioning accuracy between the users." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 163, + 301, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 163, + 301, + 295 + ], + "spans": [ + { + "bbox": [ + 45, + 163, + 301, + 295 + ], + "type": "text", + "content": "2) Flexible User-Centric Positioning: Due to their low-cost and reconfigurability features, the locations of pinching antennas can be tailored to a serving user for realizing flexible user-centric positioning. To facilitate the performance analysis, the association between the pinching antennas and the waveguides is required. Without loss of generality, assume that there are " + }, + { + "bbox": [ + 45, + 163, + 301, + 295 + ], + "type": "inline_equation", + "content": "\\tilde{N} = \\frac{N}{N_{\\mathrm{WG}}}" + }, + { + "bbox": [ + 45, + 163, + 301, + 295 + ], + "type": "text", + "content": " pinching antennas on each waveguide. Denote the location of the " + }, + { + "bbox": [ + 45, + 163, + 301, + 295 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 45, + 163, + 301, + 295 + ], + "type": "text", + "content": " -th antenna on the " + }, + { + "bbox": [ + 45, + 163, + 301, + 295 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 45, + 163, + 301, + 295 + ], + "type": "text", + "content": " -th waveguide by " + }, + { + "bbox": [ + 45, + 163, + 301, + 295 + ], + "type": "inline_equation", + "content": "\\psi_{in}^{\\mathrm{Pin}} = (x_{in}^{\\mathrm{Pin}},y_{in}^{\\mathrm{Pin}},d_{\\mathrm{H}})" + }, + { + "bbox": [ + 45, + 163, + 301, + 295 + ], + "type": "text", + "content": ". Furthermore, assume that the antennas are equally spaced, and define " + }, + { + "bbox": [ + 45, + 163, + 301, + 295 + ], + "type": "inline_equation", + "content": "\\Delta_x = |x_{in}^{\\mathrm{Pin}} - x_{im}^{\\mathrm{Pin}}|" + }, + { + "bbox": [ + 45, + 163, + 301, + 295 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 163, + 301, + 295 + ], + "type": "inline_equation", + "content": "\\Delta_y = |x_{in}^{\\mathrm{Pin}} - x_{jn}^{\\mathrm{Pin}}|" + }, + { + "bbox": [ + 45, + 163, + 301, + 295 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 45, + 163, + 301, + 295 + ], + "type": "inline_equation", + "content": "m\\neq n" + }, + { + "bbox": [ + 45, + 163, + 301, + 295 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 163, + 301, + 295 + ], + "type": "inline_equation", + "content": "i\\neq j" + }, + { + "bbox": [ + 45, + 163, + 301, + 295 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 294, + 301, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 294, + 301, + 377 + ], + "spans": [ + { + "bbox": [ + 45, + 294, + 301, + 377 + ], + "type": "text", + "content": "For illustrative purposes, assume that all " + }, + { + "bbox": [ + 45, + 294, + 301, + 377 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 45, + 294, + 301, + 377 + ], + "type": "text", + "content": " pinching antennas are activated in a square area with " + }, + { + "bbox": [ + 45, + 294, + 301, + 377 + ], + "type": "inline_equation", + "content": "\\mathrm{U}_m" + }, + { + "bbox": [ + 45, + 294, + 301, + 377 + ], + "type": "text", + "content": " at its center, where " + }, + { + "bbox": [ + 45, + 294, + 301, + 377 + ], + "type": "inline_equation", + "content": "\\tilde{N} = N_{\\mathrm{WG}}" + }, + { + "bbox": [ + 45, + 294, + 301, + 377 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 294, + 301, + 377 + ], + "type": "inline_equation", + "content": "\\Delta_x = \\Delta_y = \\Delta" + }, + { + "bbox": [ + 45, + 294, + 301, + 377 + ], + "type": "text", + "content": ". This assumption is made to facilitate the performance analysis, and more practical setups will be considered in the simulation section. Define " + }, + { + "bbox": [ + 45, + 294, + 301, + 377 + ], + "type": "inline_equation", + "content": "\\bar{N} = \\frac{\\tilde{N}}{2}" + }, + { + "bbox": [ + 45, + 294, + 301, + 377 + ], + "type": "text", + "content": ", and without loss of generality, assume that " + }, + { + "bbox": [ + 45, + 294, + 301, + 377 + ], + "type": "inline_equation", + "content": "\\bar{N}" + }, + { + "bbox": [ + 45, + 294, + 301, + 377 + ], + "type": "text", + "content": " is an even number." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 379, + 301, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 379, + 301, + 403 + ], + "spans": [ + { + "bbox": [ + 45, + 379, + 301, + 403 + ], + "type": "text", + "content": "With these assumptions, the CRLB in (8) can be simplified as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 58, + 408, + 289, + 449 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 408, + 289, + 449 + ], + "spans": [ + { + "bbox": [ + 58, + 408, + 289, + 449 + ], + "type": "interline_equation", + "content": "\\mathrm {C R B} _ {m} = \\frac {\\frac {K _ {E} \\Delta^ {2}}{4 (2 K _ {E} + 1)}}{\\sum_ {i = 1} ^ {\\bar {N}} \\sum_ {n = 1} ^ {\\bar {N}} \\frac {(n - \\frac {1}{2}) ^ {2}}{\\beta_ {n i} ^ {2}}} + \\frac {\\frac {K _ {E} \\Delta^ {2}}{4 (2 K _ {E} + 1)}}{\\sum_ {i = 1} ^ {\\bar {N}} \\sum_ {n = 1} ^ {\\bar {N}} \\frac {(i - \\frac {1}{2}) ^ {2}}{\\beta_ {n i} ^ {2}}},", + "image_path": "809d2cd5feaaa0dd5d06c213f31f3beb69e233a29002300ab0744dc226b58530.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "spans": [ + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "type": "inline_equation", + "content": "= \\left(n - \\frac{1}{2}\\right)^{2} + \\left(i - \\frac{1}{2}\\right)^{2} + \\frac{d_{\\mathrm{H}}^{2}}{\\Delta^{2}}" + }, + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "type": "text", + "content": ". The above CRLB can be used to design the antenna placement, i.e., the optimal choice of " + }, + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "type": "text", + "content": " for minimizing the CRLB. Computer simulations can be used to verify that " + }, + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "type": "inline_equation", + "content": "\\frac{\\partial^2\\mathrm{CRB}_m}{\\partial\\Delta^2} > 0" + }, + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "type": "inline_equation", + "content": "\\mathrm{CRB}_m" + }, + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "type": "text", + "content": " is a convex function of " + }, + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "type": "text", + "content": ", and hence convex optimization solvers can be used to find the optimal solution of " + }, + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "type": "text", + "content": " efficiently. To obtain an insightful understanding of the optimal choice of " + }, + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "type": "text", + "content": ", a special case with " + }, + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "type": "inline_equation", + "content": "N = 4" + }, + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "type": "text", + "content": " is focused on in the following. We note that this special case is important in practice, given the fact that using a small number of antennas is helpful in reducing system overhead. For the case with " + }, + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "type": "inline_equation", + "content": "N = 4" + }, + { + "bbox": [ + 45, + 457, + 301, + 605 + ], + "type": "text", + "content": ", the CRLB can be simplified as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 96, + 612, + 301, + 641 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 612, + 301, + 641 + ], + "spans": [ + { + "bbox": [ + 96, + 612, + 301, + 641 + ], + "type": "interline_equation", + "content": "\\mathrm {C R B} _ {m} = \\frac {2 K _ {E} \\Delta^ {2}}{\\left(2 K _ {E} + 1\\right)} \\left(\\frac {1}{2} + \\frac {d _ {\\mathrm {H}} ^ {2}}{\\Delta^ {2}}\\right) ^ {2}, \\tag {13}", + "image_path": "7957cdc5bc4aca57f3b61f03118c9f6b7913f87249d6ba6442b9ac408f465fd0.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 647, + 211, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 647, + 211, + 659 + ], + "spans": [ + { + "bbox": [ + 45, + 647, + 211, + 659 + ], + "type": "text", + "content": "whose first-order derivative is given by" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 58, + 666, + 301, + 694 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 666, + 301, + 694 + ], + "spans": [ + { + "bbox": [ + 58, + 666, + 301, + 694 + ], + "type": "interline_equation", + "content": "\\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial \\Delta} = \\frac {4 K _ {E}}{(2 K _ {E} + 1)} \\left(\\frac {1}{2} \\Delta + \\frac {d _ {\\mathrm {H}} ^ {2}}{\\Delta}\\right) \\left(\\frac {1}{2} - \\frac {d _ {\\mathrm {H}} ^ {2}}{\\Delta^ {2}}\\right). \\tag {14}", + "image_path": "e38af74bd0ebaec881bdde2aca1a48ff6d2810acd55316bfa5058f251dae72d6.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 700, + 257, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 700, + 257, + 712 + ], + "spans": [ + { + "bbox": [ + 45, + 700, + 257, + 712 + ], + "type": "text", + "content": "The second-order derivative of " + }, + { + "bbox": [ + 45, + 700, + 257, + 712 + ], + "type": "inline_equation", + "content": "\\mathrm{CRB}_m" + }, + { + "bbox": [ + 45, + 700, + 257, + 712 + ], + "type": "text", + "content": " is given by" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 83, + 719, + 301, + 746 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 719, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 83, + 719, + 301, + 746 + ], + "type": "interline_equation", + "content": "\\frac {\\partial^ {2} \\mathrm {C R B} _ {m}}{\\partial \\Delta^ {2}} = \\frac {4 K _ {E}}{\\left(2 K _ {E} + 1\\right)} \\left(\\frac {1}{4} + 3 \\frac {d _ {\\mathrm {H}} ^ {4}}{\\Delta^ {4}}\\right) > 0, \\tag {15}", + "image_path": "547d79cbc78526a73c650c0b1ea306da82b683b588de1e662784af6b3e9448c8.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 54, + 564, + 91 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 54, + 564, + 91 + ], + "spans": [ + { + "bbox": [ + 308, + 54, + 564, + 91 + ], + "type": "text", + "content": "which means that " + }, + { + "bbox": [ + 308, + 54, + 564, + 91 + ], + "type": "inline_equation", + "content": "\\mathrm{CRB}_m" + }, + { + "bbox": [ + 308, + 54, + 564, + 91 + ], + "type": "text", + "content": " is a convex function of " + }, + { + "bbox": [ + 308, + 54, + 564, + 91 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 308, + 54, + 564, + 91 + ], + "type": "text", + "content": ". Therefore, the optimal solution of " + }, + { + "bbox": [ + 308, + 54, + 564, + 91 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 308, + 54, + 564, + 91 + ], + "type": "text", + "content": " for minimizing the CRLB for the special case with " + }, + { + "bbox": [ + 308, + 54, + 564, + 91 + ], + "type": "inline_equation", + "content": "N = 4" + }, + { + "bbox": [ + 308, + 54, + 564, + 91 + ], + "type": "text", + "content": " is given by" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 407, + 99, + 564, + 113 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 407, + 99, + 564, + 113 + ], + "spans": [ + { + "bbox": [ + 407, + 99, + 564, + 113 + ], + "type": "interline_equation", + "content": "\\Delta^ {*} = \\sqrt {2} d _ {H}. \\tag {16}", + "image_path": "c075e3bc68e2e5ee15dc6116b2ff2a6810ae8aadf71013e8d8de860a1ed1c4a4.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 121, + 564, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 121, + 564, + 181 + ], + "spans": [ + { + "bbox": [ + 308, + 121, + 564, + 181 + ], + "type": "text", + "content": "Remark 2: An intuition is that the CRLB is minimized if all the antennas are placed as close to the user as possible, i.e., " + }, + { + "bbox": [ + 308, + 121, + 564, + 181 + ], + "type": "inline_equation", + "content": "\\Delta^{*} \\to 0" + }, + { + "bbox": [ + 308, + 121, + 564, + 181 + ], + "type": "text", + "content": " (or " + }, + { + "bbox": [ + 308, + 121, + 564, + 181 + ], + "type": "inline_equation", + "content": "\\frac{\\lambda}{2}" + }, + { + "bbox": [ + 308, + 121, + 564, + 181 + ], + "type": "text", + "content": " to avoid antenna coupling). (16) shows that this intuition is wrong, where the optimal antenna spacing is a function of the height of the waveguides." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 181, + 564, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 181, + 564, + 245 + ], + "spans": [ + { + "bbox": [ + 308, + 181, + 564, + 245 + ], + "type": "text", + "content": "3) Local-Maximum Property of CRLB: In the proximity of each pinching antenna, " + }, + { + "bbox": [ + 308, + 181, + 564, + 245 + ], + "type": "inline_equation", + "content": "\\psi_{n}^{\\mathrm{Pin}}" + }, + { + "bbox": [ + 308, + 181, + 564, + 245 + ], + "type": "text", + "content": ", there exists a local maximum of " + }, + { + "bbox": [ + 308, + 181, + 564, + 245 + ], + "type": "inline_equation", + "content": "\\mathrm{CRB}_m" + }, + { + "bbox": [ + 308, + 181, + 564, + 245 + ], + "type": "text", + "content": " shown in (8). This local-maximum property can be revealed by studying " + }, + { + "bbox": [ + 308, + 181, + 564, + 245 + ], + "type": "inline_equation", + "content": "\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m}" + }, + { + "bbox": [ + 308, + 181, + 564, + 245 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 308, + 181, + 564, + 245 + ], + "type": "inline_equation", + "content": "\\frac{\\partial\\mathrm{CRB}_m}{\\partial y_m}" + }, + { + "bbox": [ + 308, + 181, + 564, + 245 + ], + "type": "text", + "content": ". Without loss of generality, " + }, + { + "bbox": [ + 308, + 181, + 564, + 245 + ], + "type": "inline_equation", + "content": "\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m}" + }, + { + "bbox": [ + 308, + 181, + 564, + 245 + ], + "type": "text", + "content": " is focused, and can be expressed as follows:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 321, + 251, + 564, + 279 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 251, + 564, + 279 + ], + "spans": [ + { + "bbox": [ + 321, + 251, + 564, + 279 + ], + "type": "interline_equation", + "content": "\\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} = \\frac {K _ {E}}{(2 K _ {E} + 1)} \\left(- \\frac {1}{\\gamma_ {1} ^ {2}} [ \\gamma_ {2} - \\gamma_ {3} ] + \\frac {1}{\\gamma_ {4} ^ {2}} \\gamma_ {5}\\right), \\tag {17}", + "image_path": "9c2b4a794f70a8d57f2ebe42f9fb43fa9645319cfa28d3e7c92d4d5f0038df56.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 286, + 564, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 286, + 564, + 358 + ], + "spans": [ + { + "bbox": [ + 308, + 286, + 564, + 358 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 308, + 286, + 564, + 358 + ], + "type": "inline_equation", + "content": "d_{mn}^2 = \\left(x_m - x_n^{\\mathrm{Pin}}\\right)^2 +\\left(y_m - y_n^{\\mathrm{Pin}}\\right)^2 +d_{\\mathrm{H}}^2," + }, + { + "bbox": [ + 308, + 286, + 564, + 358 + ], + "type": "inline_equation", + "content": "\\gamma_{1} = \\sum_{n = 1}^{N}\\frac{\\left(x_{m} - x_{n}^{\\mathrm{Pin}}\\right)^{2}}{d_{mn}^{4}},\\gamma_{2} = \\sum_{n = 1}^{N}\\frac{2\\left(x_{m} - x_{n}^{\\mathrm{Pin}}\\right)}{d_{mn}^{4}},\\gamma_{3} =" + }, + { + "bbox": [ + 308, + 286, + 564, + 358 + ], + "type": "inline_equation", + "content": "\\begin{array}{r}\\sum_{n = 1}^{N}\\frac{4\\left(x_m - x_n^{\\mathrm{Pin}}\\right)^3}{d_{mn}^6},\\gamma_4 = \\sum_{n = 1}^{N}\\frac{\\left(y_m - y_n^{\\mathrm{Pin}}\\right)^2}{d_{mn}^2},\\mathrm{and}\\gamma_5 = \\end{array}" + }, + { + "bbox": [ + 308, + 286, + 564, + 358 + ], + "type": "inline_equation", + "content": "\\begin{array}{r}\\sum_{n = 1}^{N}\\frac{4\\left(x_m - x_n^{\\mathrm{Pin}}\\right)\\left(y_m - y_n^{\\mathrm{Pin}}\\right)^2}{d_{mn}^6}. \\end{array}" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 358, + 564, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 358, + 564, + 416 + ], + "spans": [ + { + "bbox": [ + 308, + 358, + 564, + 416 + ], + "type": "text", + "content": "Without loss of generality, assume that " + }, + { + "bbox": [ + 308, + 358, + 564, + 416 + ], + "type": "inline_equation", + "content": "\\mathrm{U}_m" + }, + { + "bbox": [ + 308, + 358, + 564, + 416 + ], + "type": "text", + "content": " is in the proximity of the first pinching antenna on the first waveguide, i.e., " + }, + { + "bbox": [ + 308, + 358, + 564, + 416 + ], + "type": "inline_equation", + "content": "x_{m} = x_{11}^{\\mathrm{Pin}} + \\delta_{x}" + }, + { + "bbox": [ + 308, + 358, + 564, + 416 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 308, + 358, + 564, + 416 + ], + "type": "inline_equation", + "content": "y_{m} = y_{11}^{\\mathrm{Pin}} + \\delta_{y}" + }, + { + "bbox": [ + 308, + 358, + 564, + 416 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 308, + 358, + 564, + 416 + ], + "type": "inline_equation", + "content": "\\delta_x\\to 0" + }, + { + "bbox": [ + 308, + 358, + 564, + 416 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 308, + 358, + 564, + 416 + ], + "type": "inline_equation", + "content": "\\delta_y\\rightarrow 0" + }, + { + "bbox": [ + 308, + 358, + 564, + 416 + ], + "type": "text", + "content": ". In this case, " + }, + { + "bbox": [ + 308, + 358, + 564, + 416 + ], + "type": "inline_equation", + "content": "\\gamma_{1}" + }, + { + "bbox": [ + 308, + 358, + 564, + 416 + ], + "type": "text", + "content": " in (17) can be approximated as follows:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 343, + 423, + 564, + 460 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 343, + 423, + 564, + 460 + ], + "spans": [ + { + "bbox": [ + 343, + 423, + 564, + 460 + ], + "type": "interline_equation", + "content": "\\gamma_ {1} \\approx \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {n ^ {2} \\Delta_ {x} ^ {2}}{\\left(n ^ {2} \\Delta_ {x} ^ {2} + (i - 1) ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}, \\tag {18}", + "image_path": "b5c18237860340138586e9dc7f54cb91dbb12c3aa6edb0d714545f8f944c6b5c.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 468, + 563, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 468, + 563, + 494 + ], + "spans": [ + { + "bbox": [ + 308, + 468, + 563, + 494 + ], + "type": "text", + "content": "where the terms at the order of " + }, + { + "bbox": [ + 308, + 468, + 563, + 494 + ], + "type": "inline_equation", + "content": "\\delta_x^2" + }, + { + "bbox": [ + 308, + 468, + 563, + 494 + ], + "type": "text", + "content": " are omitted. Similarly, by omitting the terms of " + }, + { + "bbox": [ + 308, + 468, + 563, + 494 + ], + "type": "inline_equation", + "content": "\\delta_x^2" + }, + { + "bbox": [ + 308, + 468, + 563, + 494 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 308, + 468, + 563, + 494 + ], + "type": "inline_equation", + "content": "\\gamma_2" + }, + { + "bbox": [ + 308, + 468, + 563, + 494 + ], + "type": "text", + "content": " can be approximated as follows:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 338, + 501, + 563, + 577 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 338, + 501, + 563, + 577 + ], + "spans": [ + { + "bbox": [ + 338, + 501, + 563, + 577 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\gamma_ {2} \\approx \\sum_ {i = 1} ^ {\\frac {N}{N}} \\frac {2 \\delta_ {x}}{\\left(\\delta^ {2} + (i - 1) ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}} \\tag {19} \\\\ - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {2 n \\Delta_ {x}}{\\left(n ^ {2} \\Delta_ {x} ^ {2} + (i - 1) ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}. \\\\ \\end{array}", + "image_path": "01ea50d42286274380a45111164366e7d2a8affde9e260718f721bf2d384510e.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 309, + 583, + 547, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 583, + 547, + 596 + ], + "spans": [ + { + "bbox": [ + 309, + 583, + 547, + 596 + ], + "type": "text", + "content": "Similarly, " + }, + { + "bbox": [ + 309, + 583, + 547, + 596 + ], + "type": "inline_equation", + "content": "\\gamma_3, \\gamma_4" + }, + { + "bbox": [ + 309, + 583, + 547, + 596 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 309, + 583, + 547, + 596 + ], + "type": "inline_equation", + "content": "\\gamma_5" + }, + { + "bbox": [ + 309, + 583, + 547, + 596 + ], + "type": "text", + "content": " can be approximated as follows:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 330, + 603, + 563, + 640 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 603, + 563, + 640 + ], + "spans": [ + { + "bbox": [ + 330, + 603, + 563, + 640 + ], + "type": "interline_equation", + "content": "\\gamma_ {3} \\approx - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {4 n ^ {3} \\Delta_ {x} ^ {3}}{\\left(n ^ {2} \\Delta_ {x} ^ {2} + (i - 1) ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {3}}, \\tag {20}", + "image_path": "37093a2ca762a3380380c7f8cf3e0bb02c3ef35903771b9c7dd9cbf571d85434.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 330, + 642, + 563, + 679 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 642, + 563, + 679 + ], + "spans": [ + { + "bbox": [ + 330, + 642, + 563, + 679 + ], + "type": "interline_equation", + "content": "\\gamma_ {4} \\approx \\sum_ {i = 1} ^ {\\frac {N}{N} - 1} \\sum_ {n = 1} ^ {\\tilde {N}} \\frac {i ^ {2} \\Delta_ {y} ^ {2}}{\\left((n - 1) ^ {2} \\Delta_ {x} ^ {2} + i ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}, \\tag {21}", + "image_path": "c09adac5029f3bb246ac669f5737eb95ab3d1703a013baaefd01eeba05fdbb53.jpg" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 330, + 681, + 563, + 717 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 681, + 563, + 717 + ], + "spans": [ + { + "bbox": [ + 330, + 681, + 563, + 717 + ], + "type": "interline_equation", + "content": "\\gamma_ {5} \\approx - \\sum_ {i = 1} ^ {\\frac {N}{N} - 1} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {n \\Delta_ {x} i ^ {2} \\Delta_ {y} ^ {2}}{\\left(n ^ {2} \\Delta_ {x} ^ {2} + i \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {3}}. \\tag {22}", + "image_path": "e87b58de530b765bf6e415c59ae2d58cb9d534917313e5b61bd88443bf0d9437.jpg" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 724, + 564, + 751 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 724, + 564, + 751 + ], + "spans": [ + { + "bbox": [ + 308, + 724, + 564, + 751 + ], + "type": "text", + "content": "To facilitate the analysis of this local-maximum property of CRLB, assume that " + }, + { + "bbox": [ + 308, + 724, + 564, + 751 + ], + "type": "inline_equation", + "content": "\\Delta_x = \\Delta_y = \\Delta \\gg d_{\\mathrm{H}}" + }, + { + "bbox": [ + 308, + 724, + 564, + 751 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 308, + 724, + 564, + 751 + ], + "type": "inline_equation", + "content": "\\tilde{N} = \\frac{N}{\\tilde{N}}" + }, + { + "bbox": [ + 308, + 724, + 564, + 751 + ], + "type": "text", + "content": ", which" + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 55, + 301, + 78 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 55, + 301, + 78 + ], + "spans": [ + { + "bbox": [ + 45, + 55, + 301, + 78 + ], + "type": "text", + "content": "means that " + }, + { + "bbox": [ + 45, + 55, + 301, + 78 + ], + "type": "inline_equation", + "content": "\\gamma_{1} = \\gamma_{3}" + }, + { + "bbox": [ + 45, + 55, + 301, + 78 + ], + "type": "text", + "content": ", and hence the CRLB can be simplified as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 80, + 294, + 160 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 80, + 294, + 160 + ], + "spans": [ + { + "bbox": [ + 52, + 80, + 294, + 160 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} \\approx \\frac {\\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \\left[ - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\frac {2 \\delta_ {x}}{(\\delta^ {2} + (i - 1) ^ {2} \\Delta^ {2} + d _ {\\mathrm {H}} ^ {2}) ^ {2}} \\right. \\\\ \\left. + \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {2 n \\Delta}{\\bar {\\beta} _ {n i} ^ {2}} - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {4 n ^ {3} \\Delta^ {3}}{\\bar {\\beta} _ {n i} ^ {3}} - \\sum_ {i = 1} ^ {\\frac {N}{N} - 1} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {4 n ^ {3} \\Delta^ {3}}{\\bar {\\beta} _ {n i} ^ {3}} \\right], \\\\ \\end{array}", + "image_path": "966213d6c77ba0bd668393a5c102d36667fbb01f2944f19727b0f886aacdb2d4.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 162, + 207, + 176 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 162, + 207, + 176 + ], + "spans": [ + { + "bbox": [ + 45, + 162, + 207, + 176 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 45, + 162, + 207, + 176 + ], + "type": "inline_equation", + "content": "\\bar{\\beta}_{ni} = (n^2 + (i - 1)^2)\\Delta^2 + d_{\\mathrm{H}}^2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 175, + 301, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 175, + 301, + 217 + ], + "spans": [ + { + "bbox": [ + 45, + 175, + 301, + 217 + ], + "type": "text", + "content": "Note that if " + }, + { + "bbox": [ + 45, + 175, + 301, + 217 + ], + "type": "inline_equation", + "content": "i = \\frac{N}{N}" + }, + { + "bbox": [ + 45, + 175, + 301, + 217 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 45, + 175, + 301, + 217 + ], + "type": "inline_equation", + "content": "\\sum_{n=1}^{\\tilde{N}-1} \\frac{4n^3\\Delta^3}{\\left((n^2+(i-1)^2)\\Delta^2+d_{\\mathrm{H}}^2\\right)^3}" + }, + { + "bbox": [ + 45, + 175, + 301, + 217 + ], + "type": "text", + "content": " is an insignificant term, which means that the CRLB can be further simplified as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 220, + 300, + 301 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 220, + 300, + 301 + ], + "spans": [ + { + "bbox": [ + 52, + 220, + 300, + 301 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} \\approx \\frac {\\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \\left[ - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\frac {2 \\delta_ {x}}{(\\delta^ {2} + (i - 1) ^ {2} \\Delta^ {2} + d _ {\\mathrm {H}} ^ {2}) ^ {2}} \\right. \\\\ \\left. + 2 \\Delta \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {n \\left(\\left((i - 1) ^ {2} - 3 n ^ {2}\\right) \\Delta^ {2} + d _ {\\mathrm {H}} ^ {2}\\right)}{\\left(\\left(n ^ {2} + (i - 1) ^ {2}\\right) \\Delta^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {3}} \\right]. \\tag {23} \\\\ \\end{array}", + "image_path": "247c87c8019459213bfe0ce6435a498ac3d201e36b5445abfef7c87b74001d6d.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 303, + 301, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 303, + 301, + 350 + ], + "spans": [ + { + "bbox": [ + 45, + 303, + 301, + 350 + ], + "type": "text", + "content": "For the case with " + }, + { + "bbox": [ + 45, + 303, + 301, + 350 + ], + "type": "inline_equation", + "content": "\\delta_x = 0" + }, + { + "bbox": [ + 45, + 303, + 301, + 350 + ], + "type": "text", + "content": ", i.e., the user is located right underneath of the pinching antenna at " + }, + { + "bbox": [ + 45, + 303, + 301, + 350 + ], + "type": "inline_equation", + "content": "\\psi_{11}^{\\mathrm{Pin}}" + }, + { + "bbox": [ + 45, + 303, + 301, + 350 + ], + "type": "text", + "content": ", by using the assumption that " + }, + { + "bbox": [ + 45, + 303, + 301, + 350 + ], + "type": "inline_equation", + "content": "\\Delta \\gg d" + }, + { + "bbox": [ + 45, + 303, + 301, + 350 + ], + "type": "text", + "content": ", the CRLB can be expressed as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 110, + 350, + 299, + 377 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 350, + 299, + 377 + ], + "spans": [ + { + "bbox": [ + 110, + 350, + 299, + 377 + ], + "type": "interline_equation", + "content": "\\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} \\approx \\frac {\\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \\frac {2}{\\Delta^ {3}} \\gamma_ {6}, \\tag {24}", + "image_path": "14299fc635244dedb6a0531d4d4b05a4d9af566abd191463f2ee7da4fce95e15.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "spans": [ + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "inline_equation", + "content": "\\gamma_6 = \\sum_{i=1}^{N} \\sum_{n=1}^{\\tilde{N}-1} \\frac{(i-1)^2 - 3n^2}{(n^2 + (i-1)^2)^3}" + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "text", + "content": ". We note that the terms of " + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "inline_equation", + "content": "\\gamma_6" + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "text", + "content": " decay rapidly by increasing " + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "inline_equation", + "content": "\\gamma_6" + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "text", + "content": " can be approximated by keeping the dominant negative term (" + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "inline_equation", + "content": "n = 1" + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "inline_equation", + "content": "i = 1" + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "text", + "content": ") and the dominant positive term (" + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "inline_equation", + "content": "n = 1" + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "inline_equation", + "content": "i = 3" + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "text", + "content": "), i.e., " + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "inline_equation", + "content": "\\gamma_6 \\approx -3 + \\frac{1}{125}" + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "text", + "content": ", which means " + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "inline_equation", + "content": "\\frac{\\partial \\mathrm{CRB}_m}{\\partial x_m} \\leq 0" + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "text", + "content": " for the case with " + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "inline_equation", + "content": "\\delta_x = 0" + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "text", + "content": ". For the case of " + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "inline_equation", + "content": "\\delta_x \\neq 0" + }, + { + "bbox": [ + 45, + 380, + 301, + 469 + ], + "type": "text", + "content": ", the CRLB can be approximated as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 85, + 472, + 261, + 499 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 472, + 261, + 499 + ], + "spans": [ + { + "bbox": [ + 85, + 472, + 261, + 499 + ], + "type": "interline_equation", + "content": "\\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} \\approx \\frac {\\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \\left[ - \\frac {2 \\delta_ {x}}{d _ {\\mathrm {H}} ^ {4}} + \\frac {2}{\\Delta^ {3}} \\gamma_ {6} \\right].", + "image_path": "746490cd069ab9d9602ce83c59d0e2cbee3542393c6445337af101da905659a7.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "spans": [ + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "text", + "content": "Due to the assumption of " + }, + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "inline_equation", + "content": "\\Delta \\gg d_{\\mathrm{H}}" + }, + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "text", + "content": " , the term " + }, + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "inline_equation", + "content": "\\frac{2\\delta_x}{d_{\\mathrm{H}}^4}" + }, + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "text", + "content": " is dominant, and hence " + }, + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "inline_equation", + "content": "\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m} >0" + }, + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "inline_equation", + "content": "\\delta_{x} < 0" + }, + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "text", + "content": " . In summary, " + }, + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "inline_equation", + "content": "\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m} < 0" + }, + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "text", + "content": " if the user's location is " + }, + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "inline_equation", + "content": "(x_{11}^{\\mathrm{Pin}},y_{11}^{\\mathrm{Pin}},0)" + }, + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "text", + "content": " , and " + }, + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "inline_equation", + "content": "\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m} >0" + }, + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "text", + "content": " if the user's location is " + }, + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "inline_equation", + "content": "(x_{11}^{\\mathrm{Pin}} + \\delta_x,y_{11}^{\\mathrm{Pin}} + \\delta_y,d_{\\mathrm{H}})" + }, + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "text", + "content": " . A similar conclusion can be established to " + }, + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "inline_equation", + "content": "\\frac{\\partial\\mathrm{CRB}_m}{\\partial y_m}" + }, + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "text", + "content": " , which means that there exists a local maximum for the CRLB around " + }, + { + "bbox": [ + 45, + 502, + 301, + 581 + ], + "type": "inline_equation", + "content": "\\psi_{n}^{\\mathrm{Pin}}" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 582, + 301, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 582, + 301, + 677 + ], + "spans": [ + { + "bbox": [ + 45, + 582, + 301, + 677 + ], + "type": "text", + "content": "Remark 3: The local maximum property of the CRLB indicates an interesting conflict between the communication and sensing functionalities of pinching antennas. In particular, placing a pinching antenna directly above the user might increase the user's data rate but also degrade positioning accuracy. In other words, this local maximum property reveals the importance of antenna placement in pinching-antenna assisted ISAC networks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 116, + 685, + 230, + 696 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 685, + 230, + 696 + ], + "spans": [ + { + "bbox": [ + 116, + 685, + 230, + 696 + ], + "type": "text", + "content": "IV. NUMERICAL STUDIES" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 45, + 700, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 700, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 45, + 700, + 301, + 748 + ], + "type": "text", + "content": "In this section, computer simulation results are presented to demonstrate the impact of pinching antennas on the positioning accuracy, where " + }, + { + "bbox": [ + 45, + 700, + 301, + 748 + ], + "type": "inline_equation", + "content": "K_{E} = 0.01" + }, + { + "bbox": [ + 45, + 700, + 301, + 748 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 45, + 700, + 301, + 748 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{W}} = 10 \\mathrm{~m}" + }, + { + "bbox": [ + 45, + 700, + 301, + 748 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 700, + 301, + 748 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{L}} = 40 \\mathrm{~m}" + }, + { + "bbox": [ + 45, + 700, + 301, + 748 + ], + "type": "text", + "content": ", unless stated otherwise." + } + ] + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 351, + 52, + 524, + 186 + ], + "blocks": [ + { + "bbox": [ + 351, + 52, + 524, + 186 + ], + "lines": [ + { + "bbox": [ + 351, + 52, + 524, + 186 + ], + "spans": [ + { + "bbox": [ + 351, + 52, + 524, + 186 + ], + "type": "image", + "image_path": "de2fd461e68d196df1294074a08a6e8f1ae03b5be4657fc9a3ee1fe1bcd21315.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 308, + 189, + 565, + 247 + ], + "lines": [ + { + "bbox": [ + 308, + 189, + 565, + 247 + ], + "spans": [ + { + "bbox": [ + 308, + 189, + 565, + 247 + ], + "type": "text", + "content": "Fig. 1. Averaged CRLBs, " + }, + { + "bbox": [ + 308, + 189, + 565, + 247 + ], + "type": "inline_equation", + "content": "\\mathrm{CRB}_m" + }, + { + "bbox": [ + 308, + 189, + 565, + 247 + ], + "type": "text", + "content": ", achieved by the considered antenna systems, where " + }, + { + "bbox": [ + 308, + 189, + 565, + 247 + ], + "type": "inline_equation", + "content": "N_{\\mathrm{WG}} = 2" + }, + { + "bbox": [ + 308, + 189, + 565, + 247 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 308, + 189, + 565, + 247 + ], + "type": "inline_equation", + "content": "d = 3\\mathrm{m}" + }, + { + "bbox": [ + 308, + 189, + 565, + 247 + ], + "type": "text", + "content": ". For the pinching-antenna system, on each waveguide, there are " + }, + { + "bbox": [ + 308, + 189, + 565, + 247 + ], + "type": "inline_equation", + "content": "\\frac{N}{N_{\\mathrm{WG}}}" + }, + { + "bbox": [ + 308, + 189, + 565, + 247 + ], + "type": "text", + "content": " antennas, which are equally spaced. Due to the singularity issue experienced by conventional antennas discussed in Section III-B1, users are assumed to be excluded from a square area with its side being " + }, + { + "bbox": [ + 308, + 189, + 565, + 247 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 308, + 189, + 565, + 247 + ], + "type": "text", + "content": " and its center at the origin." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 349, + 255, + 514, + 371 + ], + "blocks": [ + { + "bbox": [ + 349, + 255, + 514, + 371 + ], + "lines": [ + { + "bbox": [ + 349, + 255, + 514, + 371 + ], + "spans": [ + { + "bbox": [ + 349, + 255, + 514, + 371 + ], + "type": "image", + "image_path": "8c86d7c82be13cfdb700185fedbcfc043903bcacc188a31c96bd78c0d022b95d.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 392, + 379, + 482, + 389 + ], + "lines": [ + { + "bbox": [ + 392, + 379, + 482, + 389 + ], + "spans": [ + { + "bbox": [ + 392, + 379, + 482, + 389 + ], + "type": "text", + "content": "(a) Conventional Antennas" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 351, + 402, + 512, + 517 + ], + "blocks": [ + { + "bbox": [ + 351, + 402, + 512, + 517 + ], + "lines": [ + { + "bbox": [ + 351, + 402, + 512, + 517 + ], + "spans": [ + { + "bbox": [ + 351, + 402, + 512, + 517 + ], + "type": "image", + "image_path": "1471639b0f119a70aea449f2c23ef35cea4d5de252869d12349b3c54fafbf1c3.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 397, + 525, + 473, + 535 + ], + "lines": [ + { + "bbox": [ + 397, + 525, + 473, + 535 + ], + "spans": [ + { + "bbox": [ + 397, + 525, + 473, + 535 + ], + "type": "text", + "content": "(b) Pinching Antennas" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 308, + 534, + 563, + 564 + ], + "lines": [ + { + "bbox": [ + 308, + 534, + 563, + 564 + ], + "spans": [ + { + "bbox": [ + 308, + 534, + 563, + 564 + ], + "type": "text", + "content": "Fig. 2. CRLBs achieved by the considered antenna systems. " + }, + { + "bbox": [ + 308, + 534, + 563, + 564 + ], + "type": "inline_equation", + "content": "N = 20" + }, + { + "bbox": [ + 308, + 534, + 563, + 564 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 308, + 534, + 563, + 564 + ], + "type": "inline_equation", + "content": "N_{\\mathrm{WG}} = 2" + }, + { + "bbox": [ + 308, + 534, + 563, + 564 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 308, + 534, + 563, + 564 + ], + "type": "inline_equation", + "content": "d = 3 \\, \\mathrm{m}" + }, + { + "bbox": [ + 308, + 534, + 563, + 564 + ], + "type": "text", + "content": ". On each waveguide, there are " + }, + { + "bbox": [ + 308, + 534, + 563, + 564 + ], + "type": "inline_equation", + "content": "\\frac{N}{N_{\\mathrm{WG}}}" + }, + { + "bbox": [ + 308, + 534, + 563, + 564 + ], + "type": "text", + "content": " antennas, which are equally spaced." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 567, + 564, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 567, + 564, + 686 + ], + "spans": [ + { + "bbox": [ + 307, + 567, + 564, + 686 + ], + "type": "text", + "content": "In Fig. 1, the averaged CRLBs achieved by the conventional and pinching-antenna systems are shown as functions of the number of antennas, where " + }, + { + "bbox": [ + 307, + 567, + 564, + 686 + ], + "type": "inline_equation", + "content": "\\mathrm{U}_m" + }, + { + "bbox": [ + 307, + 567, + 564, + 686 + ], + "type": "text", + "content": " is assumed to be uniformly deployed in the service area. Because the conventional-antenna system suffers the singularity issue discussed in Section III-B1, it is assumed that " + }, + { + "bbox": [ + 307, + 567, + 564, + 686 + ], + "type": "inline_equation", + "content": "\\mathrm{U}_m" + }, + { + "bbox": [ + 307, + 567, + 564, + 686 + ], + "type": "text", + "content": " cannot be located in a square area with its side being " + }, + { + "bbox": [ + 307, + 567, + 564, + 686 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 307, + 567, + 564, + 686 + ], + "type": "text", + "content": " and its center at the origin. As can be seen from Fig. 1, the use of pinching antennas yields a significant performance gain over conventional antennas, regardless of the choices of " + }, + { + "bbox": [ + 307, + 567, + 564, + 686 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 307, + 567, + 564, + 686 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 307, + 567, + 564, + 686 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 307, + 567, + 564, + 686 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 689, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 689, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 307, + 689, + 564, + 749 + ], + "type": "text", + "content": "Fig. 2 is provided to highlight the fact that a user's positioning accuracy depends on its location. On the one hand, Fig. 2(a) shows that for conventional antennas, a user can experience extremely poor positioning accuracy if it is located far away from the center of the service area, which" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 89, + 41, + 252, + 156 + ], + "blocks": [ + { + "bbox": [ + 89, + 41, + 252, + 156 + ], + "lines": [ + { + "bbox": [ + 89, + 41, + 252, + 156 + ], + "spans": [ + { + "bbox": [ + 89, + 41, + 252, + 156 + ], + "type": "image", + "image_path": "214063bb72dbd9a11d4eb4e79e473fdc96c3ca35a008baf61c81991492ed251a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 88, + 163, + 257, + 178 + ], + "lines": [ + { + "bbox": [ + 88, + 163, + 257, + 178 + ], + "spans": [ + { + "bbox": [ + 88, + 163, + 257, + 178 + ], + "type": "text", + "content": "(a) Positioning with a focal point at " + }, + { + "bbox": [ + 88, + 163, + 257, + 178 + ], + "type": "inline_equation", + "content": "\\left(-\\frac{D_{\\mathrm{L}}}{4},0,0\\right)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 88, + 190, + 249, + 305 + ], + "blocks": [ + { + "bbox": [ + 88, + 190, + 249, + 305 + ], + "lines": [ + { + "bbox": [ + 88, + 190, + 249, + 305 + ], + "spans": [ + { + "bbox": [ + 88, + 190, + 249, + 305 + ], + "type": "image", + "image_path": "5959a4483a3d708e1fda07cb15e0ba8ae7a2653fca446613aace2299d7205a6c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 91, + 312, + 252, + 327 + ], + "lines": [ + { + "bbox": [ + 91, + 312, + 252, + 327 + ], + "spans": [ + { + "bbox": [ + 91, + 312, + 252, + 327 + ], + "type": "text", + "content": "(b) Positioning with a focal point at " + }, + { + "bbox": [ + 91, + 312, + 252, + 327 + ], + "type": "inline_equation", + "content": "\\left(\\frac{D_{\\mathrm{L}}}{4},0,0\\right)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 45, + 327, + 301, + 368 + ], + "lines": [ + { + "bbox": [ + 45, + 327, + 301, + 368 + ], + "spans": [ + { + "bbox": [ + 45, + 327, + 301, + 368 + ], + "type": "text", + "content": "Fig. 3. Using pinching antennas to achieve flexible user-centric positioning. " + }, + { + "bbox": [ + 45, + 327, + 301, + 368 + ], + "type": "inline_equation", + "content": "N = 20" + }, + { + "bbox": [ + 45, + 327, + 301, + 368 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 45, + 327, + 301, + 368 + ], + "type": "inline_equation", + "content": "N_{\\mathrm{WG}} = 2" + }, + { + "bbox": [ + 45, + 327, + 301, + 368 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 327, + 301, + 368 + ], + "type": "inline_equation", + "content": "d = 3 \\mathrm{~m}" + }, + { + "bbox": [ + 45, + 327, + 301, + 368 + ], + "type": "text", + "content": ". On each waveguide, there are " + }, + { + "bbox": [ + 45, + 327, + 301, + 368 + ], + "type": "inline_equation", + "content": "\\frac{N}{N_{\\mathrm{WG}}}" + }, + { + "bbox": [ + 45, + 327, + 301, + 368 + ], + "type": "text", + "content": " antennas, which are equally spaced in a segment with its length being " + }, + { + "bbox": [ + 45, + 327, + 301, + 368 + ], + "type": "inline_equation", + "content": "\\frac{D_{\\mathrm{L}}}{2}" + }, + { + "bbox": [ + 45, + 327, + 301, + 368 + ], + "type": "text", + "content": " and its center at the focal points shown in the figures." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 369, + 300, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 369, + 300, + 475 + ], + "spans": [ + { + "bbox": [ + 45, + 369, + 300, + 475 + ], + "type": "text", + "content": "confirms the analytical results shown in (10). On the other hand, Fig. 2(b) shows that the use of pinching antennas ensures reasonably accurate positioning, regardless of whether the user is at the center or the edge of the service area. This also means that for the multi-user scenario, using pinching antennas can ensure fairness for the users' positioning accuracy. We note that in Fig. 2(b), local maximums are clearly visible in the proximity of the pinching antennas, which confirms the analysis shown in Section III-B3." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 476, + 301, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 476, + 301, + 631 + ], + "spans": [ + { + "bbox": [ + 45, + 476, + 301, + 631 + ], + "type": "text", + "content": "Recall that one key feature of pinching antennas is their reconfiguration capabilities, where the number and the locations of the antennas can be changed in a flexible manner. Fig. 3 demonstrates how this reconfiguration feature can be used to achieve flexible user-centric positioning. In particular, Figs. 3(a) and 3(b) show that by activating the pinching antennas close to the intended user locations, different focal points can be realized, which means that users close to these focal points can enjoy high positioning accuracy. For the case where the pinching antennas are clustered close to a user, Fig. 4 is provided to show the impact of the antenna spacing on the CRLB, where the accuracy of the analytical results developed in (16) is also verified." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 133, + 637, + 212, + 648 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 637, + 212, + 648 + ], + "spans": [ + { + "bbox": [ + 133, + 637, + 212, + 648 + ], + "type": "text", + "content": "V. CONCLUSIONS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 652, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 652, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 652, + 301, + 750 + ], + "type": "text", + "content": "This letter investigated how the key features of pinching antennas can be used to support ISAC from the CRLB perspective. In particular, the CRLB achieved by pinching antennas was first derived and then compared to that of conventional antennas. The presented analytical and simulation results demonstrated that the use of pinching antennas can significantly reduce CRLB and, hence, enhance the sensing capability. In addition, this letter showed that the low-cost and" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 351, + 32, + 525, + 167 + ], + "blocks": [ + { + "bbox": [ + 351, + 32, + 525, + 167 + ], + "lines": [ + { + "bbox": [ + 351, + 32, + 525, + 167 + ], + "spans": [ + { + "bbox": [ + 351, + 32, + 525, + 167 + ], + "type": "image", + "image_path": "95e52804193e9fcc17868d063866468da9677490c21f4494ad8893fb6d18da17.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 308, + 171, + 564, + 208 + ], + "lines": [ + { + "bbox": [ + 308, + 171, + 564, + 208 + ], + "spans": [ + { + "bbox": [ + 308, + 171, + 564, + 208 + ], + "type": "text", + "content": "Fig. 4. Impact of the antenna spacing on the CRLB. " + }, + { + "bbox": [ + 308, + 171, + 564, + 208 + ], + "type": "inline_equation", + "content": "N = 4" + }, + { + "bbox": [ + 308, + 171, + 564, + 208 + ], + "type": "text", + "content": " pinching antennas are activated in a square-shape area with the antenna spacing being " + }, + { + "bbox": [ + 308, + 171, + 564, + 208 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 308, + 171, + 564, + 208 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 308, + 171, + 564, + 208 + ], + "type": "inline_equation", + "content": "\\mathrm{U}_m" + }, + { + "bbox": [ + 308, + 171, + 564, + 208 + ], + "type": "text", + "content": " located at the center of the area, where " + }, + { + "bbox": [ + 308, + 171, + 564, + 208 + ], + "type": "inline_equation", + "content": "N_{\\mathrm{WG}} = 2" + }, + { + "bbox": [ + 308, + 171, + 564, + 208 + ], + "type": "text", + "content": ". The analytical results are based on (16)." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 213, + 564, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 213, + 564, + 237 + ], + "spans": [ + { + "bbox": [ + 308, + 213, + 564, + 237 + ], + "type": "text", + "content": "reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 406, + 243, + 466, + 252 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 406, + 243, + 466, + 252 + ], + "spans": [ + { + "bbox": [ + 406, + 243, + 466, + 252 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 310, + 258, + 564, + 734 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 315, + 258, + 564, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 258, + 564, + 285 + ], + "spans": [ + { + "bbox": [ + 315, + 258, + 564, + 285 + ], + "type": "text", + "content": "[1] A. Fukuda, H. Yamamoto, H. Okazaki, Y. Suzuki, and K. Kawai, \"Pinching antenna - using a dielectric waveguide as an antenna,\" NTT DOCOMO Technical J., vol. 23, no. 3, pp. 5-12, Jan. 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 285, + 563, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 285, + 563, + 312 + ], + "spans": [ + { + "bbox": [ + 315, + 285, + 563, + 312 + ], + "type": "text", + "content": "[2] Z. Ding, R. Schober, and H. V. Poor, \"Flexible-antenna systems: A pinching-antenna perspective,\" IEEE Trans. Commun., (to appear in 2025) Available on-line at arXiv:2412.02376." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 314, + 312, + 563, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 312, + 563, + 338 + ], + "spans": [ + { + "bbox": [ + 314, + 312, + 563, + 338 + ], + "type": "text", + "content": "[3] Z. Ding and H. V. Poor, “Los blockage in pinching-antenna systems: Curse or blessing?” IEEE Wireless Commun. Lett., (submitted) Available on-line at arXiv:2503.08554." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 339, + 563, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 339, + 563, + 366 + ], + "spans": [ + { + "bbox": [ + 314, + 339, + 563, + 366 + ], + "type": "text", + "content": "[4] K. Wang, Z. Ding, and R. Schober, \"Antenna activation for NOMA assisted pinching-antenna systems,\" IEEE Commun. Lett., (to appear in 2025) Available on-line at arXiv:2412.13969." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 314, + 366, + 563, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 366, + 563, + 392 + ], + "spans": [ + { + "bbox": [ + 314, + 366, + 563, + 392 + ], + "type": "text", + "content": "[5] C. Ouyang, Z. Wang, Y. Liu, and Z. Ding, \"Array gain for pinching-antenna systems (PASS),\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2501.05657." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 392, + 563, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 392, + 563, + 420 + ], + "spans": [ + { + "bbox": [ + 314, + 392, + 563, + 420 + ], + "type": "text", + "content": "[6] Z. Wang, C. Ouyang, X. Mu, Y. Liu, and Z. Ding, \"Modeling and beamforming optimization for pinching-antenna systems,\" IEEE Trans. Wireless Commun., (submitted) Available on-line at arXiv:2502.05917." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 314, + 420, + 563, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 420, + 563, + 446 + ], + "spans": [ + { + "bbox": [ + 314, + 420, + 563, + 446 + ], + "type": "text", + "content": "[7] Y. Xu, Z. Ding, and G. Karagiannidis, \"Rate maximization for downlink pinching-antenna systems,\" IEEE Commun. Lett., (to appear in 2025) Available on-line at arXiv:2502.12629." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 314, + 447, + 563, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 447, + 563, + 474 + ], + "spans": [ + { + "bbox": [ + 314, + 447, + 563, + 474 + ], + "type": "text", + "content": "[8] X. Mu, G. Zhu, and Y. Liu, \"Pinching-antenna system (PASS)-enabled multicast communications,\" IEEE Trans. Commun., (submitted) Available on-line at arXiv:2502.16624." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 314, + 474, + 563, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 474, + 563, + 500 + ], + "spans": [ + { + "bbox": [ + 314, + 474, + 563, + 500 + ], + "type": "text", + "content": "[9] J. Xiao, J. Wang, and Y. Liu, \"Channel estimation for pinching-antenna systems (PASS),\" IEEE Trans. Commun., (submitted) Available on-line at arXiv:2503.13268." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 501, + 563, + 527 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 501, + 563, + 527 + ], + "spans": [ + { + "bbox": [ + 310, + 501, + 563, + 527 + ], + "type": "text", + "content": "[10] ——, “Beam training for pinching-antenna systems (PASS),” IEEE Trans. Wireless Commun., (submitted) Available on-line at arXiv:2502.05921." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 527, + 563, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 527, + 563, + 553 + ], + "spans": [ + { + "bbox": [ + 310, + 527, + 563, + 553 + ], + "type": "text", + "content": "[11] X. Xie, Y. Lu, and Z. Ding, \"Graph neural network enabled pinching antennas,\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2502.05447." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 554, + 563, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 554, + 563, + 581 + ], + "spans": [ + { + "bbox": [ + 310, + 554, + 563, + 581 + ], + "type": "text", + "content": "[12] J. Guo, Y. Liu, and A. Nallanathan, \"GPASS: Deep learning for beamforming in pinching-antenna systems (PASS),\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2502.01438." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 582, + 563, + 616 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 582, + 563, + 616 + ], + "spans": [ + { + "bbox": [ + 310, + 582, + 563, + 616 + ], + "type": "text", + "content": "[13] S. A. Tegos, P. D. Diamantoulakis, Z. Ding, and G. K. Karagiannidis, \"Minimum data rate maximization for uplink pinching-antenna systems,\" IEEE Wireless Commun. Lett., (to appear in 2025) Available on-line at arXiv:2412.13892." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 617, + 563, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 617, + 563, + 643 + ], + "spans": [ + { + "bbox": [ + 310, + 617, + 563, + 643 + ], + "type": "text", + "content": "[14] M. Sun, C. Ouyang, S. Wu, and Y. Liu, \"Physical layer security for pinching-antenna systems (PASS),\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2503.09075." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 644, + 563, + 670 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 644, + 563, + 670 + ], + "spans": [ + { + "bbox": [ + 310, + 644, + 563, + 670 + ], + "type": "text", + "content": "[15] Y. Qin, Y. Fu, and H. Zhang, \"Joint antenna position and transmit power optimization for pinching antenna-assisted ISAC systems,\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2503.12872." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 671, + 563, + 707 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 671, + 563, + 707 + ], + "spans": [ + { + "bbox": [ + 310, + 671, + 563, + 707 + ], + "type": "text", + "content": "[16] F. Liu, Y. Cui, C. Masouros, J. Xu, T. X. Han, Y. C. Eldar, and S. Buzzi, \"Integrated sensing and communications: Toward dual-functional wireless networks for 6G and beyond,\" IEEE J. Sel. Areas Commun., vol. 40, no. 6, pp. 1728-1767, 2022." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 708, + 563, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 708, + 563, + 734 + ], + "spans": [ + { + "bbox": [ + 310, + 708, + 563, + 734 + ], + "type": "text", + "content": "[17] T. Jia and R. M. Buehrer, “A new cramer-rao lower bound for TOA-based localization,” in Proc. Military Commun. Conf. (MILCOM 2008), Nov. 2008, pp. 1-5." + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05812/2ebd62c4-e647-47e7-bb58-1c94267578a3_content_list.json b/data/2025/2504_05xxx/2504.05812/2ebd62c4-e647-47e7-bb58-1c94267578a3_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..3d35856f4cec4f0db555f06cfa54319d0ba726d7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/2ebd62c4-e647-47e7-bb58-1c94267578a3_content_list.json @@ -0,0 +1,2521 @@ +[ + { + "type": "text", + "text": "Right Question is Already Half the Answer: Fully Unsupervised LLM Reasoning Incentivization", + "text_level": 1, + "bbox": [ + 184, + 122, + 812, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Qingyang Zhang", + "bbox": [ + 240, + 224, + 364, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tianjin University", + "bbox": [ + 240, + 241, + 362, + 255 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Haitao Wu", + "bbox": [ + 457, + 226, + 535, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tianjin University", + "bbox": [ + 436, + 241, + 555, + 255 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Changqing Zhang", + "bbox": [ + 629, + 226, + 758, + 241 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tianjin University", + "bbox": [ + 633, + 241, + 754, + 255 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Peilin Zhao", + "bbox": [ + 310, + 276, + 393, + 287 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tencent AI Lab", + "bbox": [ + 300, + 290, + 403, + 301 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yatao Bian", + "bbox": [ + 578, + 276, + 656, + 287 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tencent AI Lab & NUS", + "bbox": [ + 539, + 290, + 696, + 301 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 339, + 537, + 356 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Existing methods to enhance the reasoning capability of large language models predominantly rely on supervised fine-tuning (SFT) followed by reinforcement learning (RL) on reasoning-specific data. These approaches critically depend on external supervisions—such as labeled reasoning traces, verified golden answers, or pre-trained reward models. In this work, we propose Entropy Minimized Policy Optimization (EMPO), which makes an early attempt at fully unsupervised LLM reasoning incentivization. By continuously minimizing the predictive entropy of LLMs on unlabeled questions in a latent semantic space, EMP0 achieves competitive performance compared to supervised counterparts on both mathematical and freeform natural reasoning tasks. Specifically, without any supervised signals, EMP0 boosts the accuracy of Qwen2.5-Math-7B Base from $30.7\\%$ to $48.1\\%$ on mathematical benchmarks and improves the accuracy of Qwen2.5-7B Base from $32.1\\%$ to $50.1\\%$ on MMLU-Pro. Primary experiments and analysis are also provided to interpret the effectiveness of EMP0. Code is available at this url.", + "bbox": [ + 228, + 369, + 767, + 564 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 588, + 313, + 603 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large language models (LLMs) have demonstrated exceptional potential in challenging tasks such as mathematical reasoning [1, 2, 3] and code generation [4]. A prevailing paradigm for training reasoning LLMs involves firstly performing supervised fine-tuning (SFT) and then reinforcement learning (RL), or iterative combinations of both, applied to reasoning-specific datasets after pretraining [5]. Unfortunately, these methods typically depend on large-scale reasoning datasets with various forms of supervised information, such as human-labeled reasoning traces, verified golden answers, or an additional pre-trained re", + "bbox": [ + 169, + 618, + 500, + 797 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ward model. As a consequence, endowing LLMs with powerful reasoning capability through human experts is becoming increasingly time-consuming and costly, which greatly limits the scalability and broader adoption of reasoning models.", + "bbox": [ + 169, + 797, + 823, + 840 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To mitigate this, previous work employs self-consistency to construct pseudo data and deploy supervised finetuning for better performance [6]. However, the performance improvement is limited and under risks of model collapse [7]. Recent advancements, such as the pioneering work PFPO [8], frame the labeling of solutions as evaluation against test cases and then leverage self-consistency", + "bbox": [ + 169, + 845, + 826, + 902 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/02778710b12d49b556848c17b7b983ca69c0dffbf0dd66e3238c493a9a9401fe.jpg", + "image_caption": [ + "Figure 1: Improvement of the proposed method on Qwen2.5-7B and Qwen2.5-7B-Math model." + ], + "image_footnote": [], + "bbox": [ + 526, + 638, + 802, + 755 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.05812v3 [cs.LG] 18 May 2025", + "bbox": [ + 22, + 255, + 60, + 707 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Preprint.", + "bbox": [ + 171, + 922, + 227, + 936 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "to generate pseudo test cases. Despite the promising results, the proposed method still necessitates supervision from instruction finetuning data and supervision signals from the frontier LLMs to initialize the RL process. Another more recent work [9] introduces a two-stage framework to construct self-rewarding reasoning models using self-generated data followed by RL. Despite the superior performance, the proposed method relies on a ground-truth verifier to obtain self-correction reasoning traces by rejection sampling. These approaches inspire our exploration of a critical open question: How can we incentivize LLM reasoning capacities in a fully unsupervised manner?", + "bbox": [ + 169, + 90, + 823, + 189 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/500ae90fc55e78382e716a769af6e3fc959b41fea3452ae5163b8e88a0534184.jpg", + "image_caption": [ + "(a) Comparison of different RL methods" + ], + "image_footnote": [], + "bbox": [ + 181, + 203, + 815, + 308 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/60bb097db42d16f1849c828d22d75124301e0a1142c86a96f7d7e2b0c7a98f8f.jpg", + "image_caption": [ + "(b) Overview of EMPO", + "Figure 2: Overview of the proposed method. (a) Previous method like PPO [10] or GRPO [11] typically relies on external supervised signals, e.g., a pretrained reward model or golden answers. (b) The proposed Entropy Minimized Policy Optimization (EMPO) samples a set of responses from the current policy model, and then builds semantic clusters according to their equivalence. By continuously minimizing the entropy at a meaning level, our method achieves competitive benchmark performance without any external supervision, i.e., rule-based reward, pre-defined test cases or an pre-trained reward model." + ], + "image_footnote": [], + "bbox": [ + 181, + 330, + 815, + 467 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recent advanced DeepSeek-R1-Zero [12] demonstrates robust reasoning capabilities without dependency on SFT data. By directly initiating RL from the base model, DeepSeek-R1-Zero autonomously evolves sophisticated reasoning behaviors such as reflection and self-critic by exploring the reward signals provided by rule-based rewards. i.e., verified golden answers or an additional pre-trained reward model. Inspired by the success of DeepSeek-R1-Zero, our motivation is to devise a fully unsupervised approach for powerful reasoning capability. Specifically, we propose a novel reinforcement learning algorithm termed as Entropy Minimized Policy Optimization (EMP0), which incentivizes the reasoning capability of LLMs in a fully unsupervised manner by minimizing their predictive entropy in a latent semantic space. This method optimizes the model to favor reasoning traces yielding consistent answers, enhancing output reliability. The semantic entropy objective we propose to minimize is a well-established measurement of LLMs' uncertainty, which extends beyond mathematical reasoning to free-form question-answering tasks. We further introduce entropy thresholding to filter unreliable reasoning traces, stabilizing the unsupervised training process. Experiments on various tasks including mathematical reasoning and free-form natural reasoning are conducted to validate the proposed method. Our contributions are summarized as follows:", + "bbox": [ + 169, + 619, + 826, + 829 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose an effective and principled strategy called Entropy-Minimized Policy Optimization (EMPO) for incentivizing the reasoning capabilities of LLMs in a fully unsupervised manner.", + "- We establish semantic entropy as a potent intrinsic reward signal for guiding LLM reasoning. Our analysis confirms a strong negative correlation between semantic entropy and model" + ], + "bbox": [ + 215, + 838, + 823, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "accuracy, validating its efficacy as a robust, unsupervised optimization objective that drives models towards generating more consistent and reliable outputs.", + "bbox": [ + 228, + 90, + 823, + 119 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- Experiments on both math reasoning tasks with deterministic golden answers and freeform natural reasoning tasks are conducted to validate the efficacy and versatility of EMP0. Additionally, we provide critical insights into EMP0's mechanism, demonstrating that its effectiveness stems from an enhanced ability to consistently select and prioritize strong, pre-existing reasoning pathways learned during pre-training, rather than instilling fundamentally new reasoning skills. This underscores EMP0's strength in efficiently eliciting and refining latent capabilities within base models.", + "bbox": [ + 215, + 128, + 826, + 227 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 171, + 253, + 323, + 268 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Self-Supervised and Semi-Supervised Reasoning. To address the dependency on labeled data, several self-supervised and unsupervised methods have emerged. Huang et al. [6] propose a self-improvement framework where LLMs generate high-confidence answers using Chain-of-Thought (CoT) prompting and self-consistency, subsequently fine-tuning on these pseudo-labels. However, the performance gains are often limited, and there is a risk of model collapse, as noted in [7]. Recently, Patel et al. [13] apply self-improvement to web navigation tasks in WebArena, fine-tuning on synthetic data generated by the model itself. Li et al. [14] enhance long-context reasoning via SeaLong, sampling multiple outputs and optimizing with Minimum Bayes Risk. These methods, while reducing reliance on external labels, still involve supervised fine-tuning steps, contrasting with EMPO's fully unsupervised RL approach. A concurrent work, i.e., test-time reinforcement learning (TTRL) [15] directly obtains pseudo label by major voting and then conducts RL on test prompts at inference time, whereas our EMPO strictly maintains the separation between training and testing phases for ensuring that the model remains unexposed to any test prompts during training. Furthermore, while TTRL is currently limited to mathematical tasks, our approach is applicable to more general free-form reasoning tasks.", + "bbox": [ + 169, + 287, + 826, + 494 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Self-Rewarding and RL-based Reasoning. RL has become a prominent technique for enhancing LLM reasoning, often leveraging external or self-generated rewards. Yuan et al. [16] propose using the LLM itself via LLM-as-a-Judge prompting to provide rewards during training, reducing reliance on human feedback. Similarly, Xiong et al. [9] propose a two-stage self-rewarding framework for mathematical reasoning, generating data and applying RL with a ground-truth verifier for self-correction, achieving superior performance but requiring supervised signals. Jiao et al. [8] frame solution labeling as evaluation against test cases, yet still rely on instruction fine-tuning and frontier LLM signals for RL initialization. Wen et al. [17] introduce Entropy-Regularized Token-Level Policy Optimization (ETPO), augmenting RL with an entropy bonus to promote exploration, differing from EMP0's entropy minimization focus. Guo et al. [12] with DeepSeek-R1 demonstrate robust reasoning via RL from a base model, using rule-based rewards. Xi et al. [18] present $\\mathbb{R}^3$ , a reverse curriculum RL approach using outcome supervision to mimic process supervision benefits. Wang et al. [19] propose CREAM, which enforces consistency regularization between internal reward models during self-training. These methods highlight a spectrum of supervision levels, positioning EMP0 as unique in its fully unsupervised nature, leveraging semantic entropy as an internal reward.", + "bbox": [ + 169, + 516, + 826, + 724 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Entropy Minimization and Semantic Consistency. Entropy minimization is a well-established technique in semi-supervised and unsupervised learning, with roots in traditional machine learning. Grandvalet and Bengio [20] demonstrate that minimizing entropy on unlabeled data can improve classification accuracy by encouraging model confidence. Test-time adaptation methods like Tent [21] adapt models to new domains by minimizing entropy on test data, filling domain gaps without additional labels. More recent work, COME, [22] extends this principle to conservative entropy minimization for robust adaptation. These approaches highlight the potential of entropy minimization as an unsupervised objective, which EMP0 leverages for LLM reasoning by extending it to semantic entropy [23] in a latent space. Farquhar et al. [24] further validate semantic entropy's utility in detecting hallucinations, reinforcing its relevance. Kharitonov et al. [25] explore entropy minimization in emergent languages, finding it naturally aligns with successful communication, providing additional theoretical foundation for EMP0.", + "bbox": [ + 169, + 744, + 826, + 910 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 171, + 89, + 272, + 104 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We propose an RL-based method to minimize the entropy of LLM generations in a latent semantic space for incentivizing its reasoning capability. We term our method Entropy-Minimized Policy Optimization (EMPO), which is devised in a fully unsupervised manner without any forms of external supervised information.", + "bbox": [ + 169, + 119, + 823, + 176 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Preliminaries", + "text_level": 1, + "bbox": [ + 171, + 191, + 307, + 205 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Recent advancements in reinforcement learning have demonstrated remarkable breakthroughs in enhancing the reasoning capabilities of LLMs. Taking the representative RL technique Group Relative Policy Optimization (GRPO) [11] used by DeepSeek-R1-Zero [12] as an example. GRPO first samples a group of outputs $\\{o_1, \\dots, o_G\\}$ from the policy model $\\pi_{\\theta}$ and then optimizes it by maximizing the following objective:", + "bbox": [ + 169, + 215, + 823, + 287 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {J} _ {\\mathrm {G R P O}} = \\mathbb {E} _ {[ q \\sim P (Q), \\{o _ {i} \\} _ {i = 1} \\sim \\pi_ {\\theta (O | q)} ]} \\\\ \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\left(\\min \\left(A _ {i}, \\operatorname {c l i p} (1, 1 - \\epsilon , 1 + \\epsilon) A _ {i}\\right) - \\beta K L \\left(\\pi_ {\\theta} \\mid \\mid \\pi_ {r e f}\\right) \\right. \\right], \\tag {1} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 259, + 292, + 825, + 354 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\beta$ is a hyper-parameter which avoids the policy model to diverge too far away from the reference model $\\pi_{ref}$ . $\\epsilon$ clips extreme advantages for stability. $G$ is the number of samples in one group. $A_{i}$ is the advantage computed by normalizing the rewards within each group as $A_{i} = \\frac{r_{i} - mean(\\{r_{1},\\cdots,r_{G}\\})}{std(r_{1},\\cdots,r_{G})}$ . In math reasoning task, the reward can be computed by predefined rules:", + "bbox": [ + 169, + 358, + 826, + 421 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nr _ {i} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f v e r i f i e r} (o _ {i}, a) = \\text {T r u e} \\\\ 0 & \\text {o t h e r w i s e} \\end{array} , \\right. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 374, + 426, + 825, + 460 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where a verifier is used to determine the correctness of $o_i$ by comparing it with the golden answer $a$ .", + "bbox": [ + 169, + 465, + 826, + 481 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Unlike the above example, we consider fully unsupervised optimization settings where there are no golden answers to verify the correctness of model predictions. In this circumstance, we only have unlabeled reasoning problems $P(Q)$ . Such problems were freely raised by users during the deployment of LLMs. Given a pre-training LLM $\\pi_{\\theta}$ parameterized by $\\theta$ , our goal is to enhance its reasoning ability by only utilizing the unlabeled user problems $\\{q_i\\}_{i=1}^n$ , which requests minimized cost of data collection.", + "bbox": [ + 169, + 486, + 823, + 569 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 Semantic Entropy Minimization Objective", + "text_level": 1, + "bbox": [ + 171, + 585, + 508, + 599 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Entropy is a classical unsupervised objective in the traditional semi-supervised and unsupervised learning fields [20, 26]. Previous works in computer vision show that by continuously minimizing the entropy on unlabeled samples after pre-training, the classification accuracy of machine learning models can be significantly improved to fill the domain gaps [21, 22]. The basic intuition behind entropy minimization is that a robust model should not only fit labeled data well but also make confident and consistent predictions on unlabeled data. This principle encourages the model to avoid ambiguity and make decisive predictions, thereby enhances generalization. In this work, we choose semantic entropy [23] as our unsupervised optimization objective, which is a natural extension of classical Shannon entropy specified for large language models. Intuitively speaking, minimizing semantic entropy encourages the LLMs' outputs to be more consistent in semantic level rather than format, and thus the final answers are expected to be more reliable.", + "bbox": [ + 169, + 609, + 825, + 763 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Specifically, semantic entropy first samples a group of outputs $\\{o_1,\\dots ,o_G\\}$ and then clusters the output sequences according to their meaning. That is, if two outputs share the same meaning (i.e., they are bidirectionally entailed), they should be merged into one same cluster in the semantic space. This can be done without notable computational cost by predefined rules such as N-gram, regular expressions or an additional small language model. Once built such a set of meaning clusters $\\{c\\}$ in semantic space, we then approximate the probability over the meanings as the proportion of sampled answers as", + "bbox": [ + 169, + 768, + 825, + 864 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\np \\left(c _ {j} \\mid x\\right) \\approx \\left| c _ {j} \\right| / G, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 434, + 864, + 823, + 882 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $c_{j} \\in \\{c\\}$ is the $j$ -th meaning cluster. $|c_{j}|$ denotes the numbers of outputs that belong to $c_{j}$ . Finally, given question $q$ , the semantic entropy (denoted as $H$ ) over the model's output meanings", + "bbox": [ + 169, + 883, + 826, + 912 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "distribution can be estimated as follows", + "bbox": [ + 171, + 90, + 436, + 104 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nH = - \\sum_ {c _ {j} \\in \\{c \\}} p (c _ {j} | q) \\log p (c _ {j} | q). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 385, + 107, + 825, + 141 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As proven by previous work, semantic entropy has a strong negative relationship with model accuracy, which can be used as an efficient measurement to detect unreliable LLM generations such as confabulation and hallucination [23, 24]. Motivated by this, we propose to leverage semantic entropy as an unsupervised optimization objective for incentivizing the reasoning capability of LLM.", + "bbox": [ + 169, + 145, + 826, + 202 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 Entropy-Minimized Policy Optimization", + "text_level": 1, + "bbox": [ + 171, + 215, + 495, + 231 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We propose Entropy-Minimized Policy Optimization (EMPO), an RL-based method that optimizes the pre-trained large language model $\\pi_{\\theta}$ to favor low semantic entropy responses given unlabeled user questions $\\{q_i\\}_{i=1}^n$ . Given input questions, EMPO incentivizes the outputs that belong to higher probability meaning cluster, and thus minimizes the semantic entropy over the meaning distribution. Specifically, given a question $q$ , our EMPO first samples a group of output $\\{o_1, \\ldots, o_G\\}$ from the current model $\\pi_{\\theta}$ and then merges them into a set of $M$ meaning clusters $\\{c_1, \\ldots, c_M\\}$ . As we mentioned before, this can be done without notable computational cost (please refer to the quantitative results in Appendix F) by predefined rules such as N-gram, regular expressions or an additional small language model (SLM) $^1$ . Once built such a meaning set, EMPO approximately minimizes the semantic entropy $H$ by maximizing the following objective", + "bbox": [ + 169, + 241, + 826, + 381 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {J} _ {\\mathrm {E M P O}} = \\mathbb {E} _ {[ \\{q \\} \\sim P (Q), \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta} (O | q) ]} \\frac {1}{| G |} \\sum_ {i = 1} ^ {| G |} \\left(A _ {i}\\right), A _ {i} = \\frac {r _ {i} - m e a n \\left(\\left\\{r _ {1} , \\cdots , r _ {G} \\right\\}\\right)}{\\operatorname {s t d} \\left(r _ {1} , \\cdots , r _ {G}\\right)} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 218, + 385, + 825, + 426 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $A_{i}$ is the advantage of output $o_{i}$ calculated by normalizing the rewards. Unlike GRPO in which the rewards is calculated depending on external supervision such as pre-defined rules or an reward model, in EMP0, the reward assigned for the $i$ -th outputs $o_{i}$ is the likelihood of its meaning cluster, i.e.,", + "bbox": [ + 169, + 430, + 826, + 484 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nr _ {i} = p \\left(c _ {j} \\mid q\\right), \\text {w h e r e} l \\left(o _ {i}\\right) = c _ {j}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 486, + 823, + 502 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where the meaning likelihood $p(c_{j}|q)$ is approximated by Eq. 3. Intuitively, the outputs convey higher-probability meanings are of higher advantages, and are therefore incentivized through training.", + "bbox": [ + 169, + 503, + 826, + 532 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "How to Mitigate Potential Reward Hacking? Note that different from verifiable rule-based reward, which inherently resists reward hacking risks, optimizing unsupervised entropy objectives may permit trivial solutions. For instance, models could exploit the reward signal by overfitting to high-confident but wrong predictions for the most frequent semantic clusters without carefully reasoning process. To address this, we implement a straightforward entropy thresholding strategy, restricting optimization to prompts exhibiting moderate uncertainty via dual threshold criteria. Specifically, two entropy thresholds are deployed to filter out user queries $q$ that result in overly high or low entropy unreliable answers. Extremely high entropy indicates that the model is highly uncertain, and thus its predictions are prone to be unreliable. In addition, continuously optimizing on responses with already low entropy is redundant and at the risk of overconfidence [27]. The final optimization objective of EMPO is", + "bbox": [ + 169, + 537, + 826, + 688 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {J} _ {\\mathtt {E M P O}} = \\mathbb {E} _ {[ \\{q \\} \\sim P (Q), \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta} (O | q) ]}\n$$\n", + "text_format": "latex", + "bbox": [ + 325, + 691, + 571, + 710 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left[ \\frac {1}{| G |} \\sum_ {i = 1} ^ {| G |} \\left(\\min \\left(A _ {i}, \\operatorname {c l i p} (1, 1 - \\epsilon , 1 + \\epsilon) A _ {i}\\right) \\right. \\right], \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 367, + 712, + 825, + 760 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {s . t .} \\delta_ {l o w} < H < \\delta_ {h i g h}\n$$\n", + "text_format": "latex", + "bbox": [ + 362, + 762, + 517, + 777 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $H$ is the semantic entropy defined in Eq. 4. The questions results in highly unreliable answers with entropy greater than $\\delta_{high}$ are filtered out. Besides, we also filter out low-entropy answers to maintain the diversity of model outputs and further avoid potential reward hacking. Following previous work [28], we remove the KL constraint for better performance. $\\epsilon$ clips extremely high or low advantages for stability similar to common practice.", + "bbox": [ + 169, + 781, + 826, + 852 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "1Such a SLM does not provide explicit or direct supervision signals regarding the correctness or quality of reasoning for a given query. The \"unsupervised\" nature of EMP0 refers to its independence from labeled (query, correct-answer) pairs or (query, valid-reasoning-trajectory) pairs for learning the reasoning task itself. More discussions are in Appendix I.", + "bbox": [ + 169, + 859, + 826, + 912 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 171, + 89, + 313, + 107 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Experimental Settings", + "text_level": 1, + "bbox": [ + 171, + 121, + 369, + 137 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We conduct experiments on multiple datasets including both closed-form math reasoning tasks and free-form natural reasoning tasks. Our EMP0 shows competitive performance by purely RL in a fully unsupervised manner compared to supervised finetuning and RL methods.", + "bbox": [ + 169, + 148, + 823, + 191 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Prompt Collection and Data Engineering. For mathematical reasoning, following the common practice [29, 8, 30], we adopt 20,000 prompts randomly selected from NuminaMath-CoT dataset [31] for training $^{2}$ without additional data engineering. For free-form natural reasoning tasks, we adopt the prompts from Natural Reasoning $^{3}$ , a large-scale dataset consisting of diverse reasoning questions from multiple domains (e.g., Physics, Computer Science, Economics, Social Sciences and more). For training efficiency, we filter out the questions with over-long prompt or reference answer. Besides, taking inspiration from [32, 33, 34], we use the response length of Llama3.3-70B-Instruct as a difficulty estimation metric, and filter out samples with response lengths exceeding 4096 tokens. The remaining samples are simpler for stabilizing the training process. The final training subset is consisted of 18,000 questions. More details can be found in Appendix G.", + "bbox": [ + 169, + 196, + 826, + 335 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation. For mathematical reasoning, the performance is evaluated on a diverse suite of benchmarks including Minerva Math, MATH, AMC23, OlympaidBench and AIME24. The evaluation codebase is borrowed from the SimpleRL project [35], which is consistent with other concurrent works [30]. For free-form natural reasoning, we evaluate on MMLU-Pro [36] and GPQA [37] benchmarks, which consist of challenging reasoning-focused problems across various subjects, e.g., biology, business, chemistry, computer science and so on. We prompt the model to reason step by step and output the final answer within \"\\boxed{}\" and report the multi-choice accuracy. Without specific clarification, all evaluations are conducted using zero-shot prompting and greedy-decoding.", + "bbox": [ + 169, + 340, + 826, + 454 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Model training. For mathematical reasoning tasks, we train Qwen2.5-Math-1.5B and 7B Base models with our EMP0. The baselines we consider include supervised finetuning (SFT), online direct preference optimization (ODPO) [30] and the representative GRPO. We also compared with Qwen2.5-Math Instruction models for a more comprehensive comparison, where the instruction model is trained by iteratively supervised finetuning and RL on private data. For free-form natural reasoning tasks, we initialize from Qwen2.5-3B, 7B and 14B Base models. Different from mathematical reasoning, it is difficult to adopt rule-based reward for free-form question-answering tasks without deterministic golden answers. We consider the corresponding Instruct model, the Base model with or without few-shot CoT prompt as baselines. Besides, we also compare with SFT where the Base model is tuned to fit the response of Llama3.3-70B-Instruct. For more results on other model families beyond the Qwen series (e.g., Llama3), please refer to the Appendix D.", + "bbox": [ + 169, + 458, + 826, + 612 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- SFT: We train models by supervised finetuning via Open-Instruct [38] with a fixed learning rate of $1 \\times 10^{-6}$ , a global batch size of 128 and train for 1 epoch with a max length of 2048.", + "- GRPO: We implement GRPO viaTRL [39] based on Open-R1 [29]. We sample 7 and 12 responses for each prompt for mathematical and natural reasoning tasks respectively. We train the model for 3 epochs with a maximum generation length of 2048. Following [40], we only use the rule-based accuracy reward and do not adopt format-reward. The accuracy reward is implemented as follows: If the response contains the correct final answer within \"boxed{}\", it receives a reward of 1. If the model prediction is wrong, it receives a reward of 0. When there is no answer can be extracted from the model's response, the reward is $-0.5$ .", + "- Online-DPO: Recent advanced Online-DPO first samples a set of responses and then verifies and selects the responses with highest reward and lowest reward as a preference pair. We directly copy the results from [30], where the model is trained for 7 iterations. Each iteration involves 2 training epochs and 20K training samples, i.e., 140K training samples in total.", + "- EMP0: Most hyper-parameters of our method, e.g., number of generations, max generation length, batch size, learning rate are the same with GRPO. In mathematical reasoning tasks, we use a set of regular expressions to merge the outputs into meaning clusters. For" + ], + "bbox": [ + 215, + 623, + 823, + 867 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "$^{2}$ https://huggingface.co/datasets/RLHFlow/numa_prompt_dpo1", + "bbox": [ + 189, + 883, + 571, + 897 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "3https://huggingface.co/datasets/facebook/natural_reasoning", + "bbox": [ + 194, + 898, + 550, + 910 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "more general free-form natural reasoning, we leverage General-Verifier $^4$ (a compact small language model with 1.5B parameters) to determine whether two outputs are of the same meaning or not following [23, 24]. A concrete example can be found in Appendix C. Specifically, if the final predictions (i.e., the contents within \"\\boxed{}\") of two model outputs are bidirectionally implicating, then we merge them into one semantic cluster ignoring their reasoning traces. More details are in Appendix E.", + "bbox": [ + 228, + 90, + 826, + 175 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 Main Results", + "text_level": 1, + "bbox": [ + 171, + 191, + 305, + 205 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2.1 Performance on Mathematical Reasoning Tasks.", + "text_level": 1, + "bbox": [ + 171, + 215, + 562, + 233 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We conduct experiments on mathematical tasks to evaluate our method. The main results are shown in Table 1. EMP0 has successfully incentivized the Qwen2.5-Math Base model with reasoning capability without dependency on any external supervision. We observe a substantial improvement in the average performance on commonly used mathematical reasoning benchmarks from $28.1\\%$ to $42.1\\%$ and $30.7\\%$ to $48.1\\%$ on 1.5B and 7B models, respectively. Notably, through fully unsupervised RL training, the 1.5B and 7B model has both achieved competitive performance (42.1% and $48.1\\%$ ) near to Qwen2.5-Math-Instruct (40.5% and $49.4\\%$ ), where the latter depends on private dataset and multi-stage iteratively supervised fine-tuning and reinforcement learning.", + "bbox": [ + 169, + 239, + 826, + 352 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/fc80a9084de5033e815e2520f330fdf2683f45aaa0f8388c48366e9e8069a1d9.jpg", + "table_caption": [ + "Table 1: Accuracy on mathematical reasoning benchmarks. We report the pass@1 accuracy tested with greedy decoding. The results of ODPO are directly copied from [30]. Here $q, r, a$ denote the dependency on questions, human-verified reasoning traces and golden answers respectively." + ], + "table_footnote": [], + "table_body": "
SupervisionMATHMinerva MathOlympiad BenchAIME24AMC23Avg.
frontier model
Llama-3.1-70B-Instruct{q,r,a}64.635.331.916.730.135.7
Eurus-2-7B-PRIME{q,r,a}79.238.642.126.757.848.9
1.5B model
Qwen2.5-MathNone52.210.725.210.042.528.1
Qwen2.5-Math-Instruct{q,r,a}73.830.938.76.752.540.5
Qwen2.5-Math w/SFT{q,r,a}61.826.127.13.337.531.2
Qwen2.5-Math w/GRPO{q,a}75.232.033.616.752.542.0
Qwen2.5-Math w/EMPO{q}73.032.436.613.355.042.1
7B model
Qwen2.5-MathNone64.815.126.76.740.030.7
Qwen2.5-Math Instruct{q,r,a}82.843.841.216.762.549.4
Qwen2.5-Math w/SFT{q,r,a}72.234.633.210.045.039.0
Qwen2.5-Math w/ODPO{q,a}76.830.937.926.762.547.0
Qwen2.5-Math w/GRPO{q,a}77.839.739.120.057.546.8
Qwen2.5-Math w/EMPO{q}78.040.437.320.065.048.1
", + "bbox": [ + 171, + 412, + 828, + 648 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2.2 Performance on Natural Free-form Reasoning Tasks.", + "text_level": 1, + "bbox": [ + 171, + 670, + 591, + 686 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We present the results on free-form natural reasoning tasks in Table 2. On the MMLU-Pro benchmark, our EMP0 improves the accuracy from $32.1\\%$ to $50.1\\%$ and $32.7\\%$ to $58.8\\%$ on Qwen2.5-7B and 14B Base model respectively. Besides, on more challenging GPQA benchmark, EMP0 results in increasing accuracy from $15.9\\%$ to $28.8\\%$ on 7B model, $30.6\\%$ to $35.3\\%$ on 14B model. Notably, we observe that the SFT baseline fails to consistently improve model performance. We hypothesize that this is due to the noise in the reference responses within the Natural Reasoning training data (as mentioned by [32]). This phenomenon further underscores the practical potential of our proposed method.", + "bbox": [ + 169, + 694, + 826, + 792 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2.3 Training Dynamics", + "text_level": 1, + "bbox": [ + 171, + 806, + 359, + 821 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We further conduct experiments to investigate the reliability of our unsupervised reward signals. As shown in Figure 3, the unsupervised reward signals of EMP0 have a strongly negative correlation with the true rewards based on golden answers. Thus, by continuously minimizing the semantic entropy objective, the model can boost its accuracy in a fully unsupervised manner.", + "bbox": [ + 169, + 830, + 823, + 887 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "4https://huggingface.co/TIGER-Lab/general-verifier", + "bbox": [ + 189, + 896, + 501, + 911 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/4ef9f197d8e943cb755d6ac18d94d533d69a071ce7d5d85c4d3749ba340c60ae.jpg", + "table_caption": [ + "Table 2: Accuracy results on free-form natural reasoning benchmarks. We report pass@1 accuracy tested with greedy decoding. Here $\\{q,r,a\\}$ denote the dependency on questions, human-verified reasoning traces and verifiable golden answers respectively." + ], + "table_footnote": [], + "table_body": "
SupervisionMMLU ProGPQA
STEMHumanitiesSocialOtherAvg.
3B model
Qwen2.5-Base-8.325.357.424.156.8311.2
Qwen2.5-Base 5-shot{q,r,a}34.726.247.935.935.313.8
Qwen2.5-Instruct{q,r,a}44.830.756.047.144.528.2
Qwen2.5-Base w/SFT{q,r,a}19.810.428.018.419.111.5
Qwen2.5-Base w/GRPO{q,a}32.227.749.838.735.217.1
Qwen2.5-Base w/EMPO{q}31.726.248.136.734.120.6
7B model
Qwen2.5-Base-30.123.845.934.332.115.9
Qwen2.5-Base 5-shot{q,r,a}45.736.359.149.446.823.5
Qwen2.5-Instruct{q,r,a}56.938.164.158.655.235.3
Qwen2.5-Base w/SFT{q,r,a}32.67.115.830.125.622.4
Qwen2.5-Base w/GRPO{q,a}57.136.264.456.654.533.8
Qwen2.5-Base w/EMPO{q}52.434.659.050.950.128.8
14B model
Qwen2.5-Base-30.828.044.433.032.730.6
Qwen2.5-Base 5-shot{q,r,a}51.935.863.454.451.433.2
Qwen2.5-Instruct{q,r,a}63.647.173.866.762.942.9
Qwen2.5-Base w/SFT{q,r,a}37.027.840.238.036.128.5
Qwen2.5-Base w/GRPO{q,a}62.942.168.659.859.635.6
Qwen2.5-Base w/EMPO{q}61.441.668.360.058.835.3
", + "bbox": [ + 187, + 136, + 810, + 435 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/54ea7ba11ac679ed9b70c45139b15bb0c494d20f9cfe7b9ba7e519498e1d1d00.jpg", + "image_caption": [ + "Figure 3: We visualize the training dynamics when tuning Qwen2.5-Math-7B Base model with EMP0 on 20K prompts randomly sampled from NuminaMath-CoT. The left illustrates the running average of semantic entropy (Eq. 4). The middle shows the trend of our unsupervised reward as defined by Eq. 6. The right shows the model accuracy on training data at each RL steps. Along the unsupervised RL-based training trajectory, EMP0 establishes a stable learning process with consistently decreased semantic entropy and improved accuracy." + ], + "image_footnote": [], + "bbox": [ + 176, + 448, + 383, + 583 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/075188f39a5ebdacf50005cb09d7e38cd4328467bddde5289c5e00402e56d320.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 385, + 448, + 599, + 583 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f93c2ee71c0adf195cf26c5902acef4260f08f79adba738f3dcae5c452ddfc31.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 602, + 448, + 816, + 583 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5 Discussion and Conclusion: The Role of Unsupervised Learning in Eliciting Pre-Trained Reasoning Capabilities", + "text_level": 1, + "bbox": [ + 169, + 702, + 823, + 739 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The strong empirical performance of EMP0, particularly its ability as a fully unsupervised method to match or even slightly outperform supervised counterparts like GRPO (as observed with the 7B model), prompts a deeper examination of how such reasoning incentivization mechanisms work. This is especially pertinent given the counterintuitive observation that these substantial improvements on benchmarks are achieved without a consistent increase in response length or clear evidence of an \"Aha moment\" – a hypothesized sudden emergence of enhanced reasoning capabilities.", + "bbox": [ + 169, + 752, + 823, + 835 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To dissect the nature of the improvements conferred by reinforcement learning (RL) post-training, we investigated its influence on pass@k accuracy. This metric is crucial as recent studies [41, 42] suggest that RL may not fundamentally expand the inherent reasoning capacities of LLMs beyond those already embedded in their pre-trained base. As depicted in Figure 4, our findings align with this perspective. Both GRPO and EMP0 significantly enhance pass@k scores for small to moderate", + "bbox": [ + 169, + 840, + 826, + 912 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/335efb518b469c6cc68204d57510d455982bcad4c3c5a141a0b1137dc465080b.jpg", + "image_caption": [ + "Figure 4: Pass@k curves of Qwen2.5-Math-7B Base model and its counterparts trained with GRPO and our EMP0 on Minerva Math and OMNI reasoning benchmarks. Pass@k measures the probability that at least 1 of the top $k$ generated solutions is correct. Pass@1 is equivalent to accuracy, as it checks if the single solution is correct. When $k$ is small, RL-trained models outperform the original base model. However, as $k$ increases (e.g., into the tens or hundreds), the performance of the base models often converges with, or even exceeds, that of the RL-trained models." + ], + "image_footnote": [], + "bbox": [ + 178, + 89, + 496, + 262 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/9954ebe5c2ce338a3ab700264b280f56e5763e1ff349dfed0fa4106fd524e1d7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 89, + 808, + 261 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "values of k (e.g., $k = 16$ or 32) compared to the base model. This demonstrates an improved efficiency in surfacing correct reasoning paths with fewer attempts. However, as k becomes substantially large, the performance of these RL-trained models tends to converge with, and is sometimes surpassed by, that of the base model.", + "bbox": [ + 169, + 401, + 823, + 457 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "This convergence at high $k$ values, coupled with our qualitative observations that the base models themselves already exhibit sophisticated reasoning behaviors such as pausing, self-correction, and backtracking (see Appendix for examples), strongly indicates that the foundational reasoning pathways are largely pre-existing. Consequently, RL post-training, whether supervised or unsupervised like EMP0, appears to primarily refine the model's ability to efficiently access, prioritize, and consistently select these latent reasoning patterns, rather than instilling fundamentally novel ones. The observed improvements in pass@1 (accuracy) are thus likely a consequence of this enhanced sampling efficiency.", + "bbox": [ + 169, + 462, + 826, + 574 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "These empirical insights from the pass@k analysis lend considerable support to the emerging consensus that pre-training shoulders the primary burden of endowing LLMs with their core abilities. We align our interpretation with prior insights from [43]: \"Pretraining does all the hard work. One big bet is that the pretraining phase grants all the abilities to the base LM, and finetuning is simply like a style transfer which positions the model to the right output space.\" Under this conjecture (or more precisely, an emerging, but not yet unanimously accepted consensus [41]), we attribute the efficacy of our method to the robust pretraining process of the Qwen2.5 Base model: If a base model possesses strong inherent reasoning capabilities, the subsequent challenge is not necessarily to teach it new reasoning skills from scratch, but rather to effectively elicit and guide these existing skills.", + "bbox": [ + 169, + 580, + 823, + 705 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "EMPO's success highlights that intrinsic reward signals, derived purely from the model's objective to minimize semantic entropy and thus achieve greater consistency in its outputs, can be surprisingly potent for this elicitation process. In a well-pre-trained model, outputs that are semantically consistent are more likely to align with correct and coherent reasoning. EMPO leverages this by incentivizing the model to favor such consistent outputs, effectively guiding it to refine its selection from its collection of existing reasoning strategies without requiring external validation of correctness.", + "bbox": [ + 169, + 710, + 823, + 794 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In conclusion, while RL techniques, including EMP0, may not be forging entirely new fundamental reasoning capabilities beyond what pre-training provides, their role in significantly enhancing the sampling efficiency and reliability of accessing these pre-trained abilities is of paramount practical importance. Optimizing models for such efficiency is crucial for real-world applications. EMP0, by achieving this through a fully unsupervised framework, offers a particularly scalable, cost-effective, and practical approach to unlocking and refining the vast reasoning potential embedded within pre-trained LLMs, especially in domains where curated supervisory data is scarce or prohibitively expensive to obtain.", + "bbox": [ + 169, + 800, + 826, + 911 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 173, + 89, + 267, + 104 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519, 2025.", + "[2] Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, et al. Process reinforcement through implicit rewards. arXiv preprint arXiv:2502.01456, 2025.", + "[3] Lifan Yuan, Wendi Li, Huayu Chen, Ganqu Cui, Ning Ding, Kaiyan Zhang, Bowen Zhou, Zhiyuan Liu, and Hao Peng. Free process rewards without process labels. arXiv preprint arXiv:2412.01981, 2024.", + "[4] Dejian Yang Daya Guo, Qihao Zhu. Deepseek-coder: When the large language model meets programming – the rise of code intelligence, 2024.", + "[5] An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024.", + "[6] Jiaxin Huang, Shixiang Shane Gu, Le Hou, Yuexin Wu, Xuezhi Wang, Hongkun Yu, and Jiawei Han. Large language models can self-improve. arXiv preprint arXiv:2210.11610, 2022.", + "[7] Ilia Shumailov, Zakhar Shumaylov, Yiren Zhao, Nicolas Papernot, Ross Anderson, and Yarin Gal. Ai models collapse when trained on recursively generated data. Nature, 631(8022):755-759, 2024.", + "[8] Fangkai Jiao, Geyang Guo, Xingxing Zhang, Nancy F Chen, Shafiq Joty, and Furu Wei. Preference optimization for reasoning with pseudo feedback. arXiv preprint arXiv:2411.16345, 2024.", + "[9] Wei Xiong, Hanning Zhang, Chenlu Ye, Lichang Chen, Nan Jiang, and Tong Zhang. Selfrewarding correction for mathematical reasoning. arXiv preprint arXiv:2502.19613, 2025.", + "[10] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017.", + "[11] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024.", + "[12] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025.", + "[13] Ajay Patel, Markus Hofmarcher, Claudiu Leoveanu-Condrei, Marius-Constantin Dinu, Chris Callison-Burch, and Sepp Hochreiter. Large language models can self-improve at web agent tasks. arXiv preprint arXiv:2405.20309, 2024.", + "[14] Siheng Li, Cheng Yang, Zesen Cheng, Lemao Liu, Mo Yu, Yujiu Yang, and Wai Lam. Large language models can self-improve in long-context reasoning. arXiv preprint arXiv:2411.08147, 2024.", + "[15] Yuxin Zuo, Kaiyan Zhang, Shang Qu, Li Sheng, Xuekai Zhu, Biqing Qi, Youbang Sun, Ganqu Cui, Ning Ding, and Bowen Zhou. Trl: Test-time reinforcement learning. arXiv preprint arXiv:2504.16084, 2025.", + "[16] Weizhe Yuan, Richard Yuanzhe Pang, Kyunghyun Cho, Xian Li, Sainbayar Sukhbaatar, Jing Xu, and Jason Weston. Self-rewarding language models, 2024.", + "[17] Muning Wen, Cheng Deng, Jun Wang, Weinan Zhang, and Ying Wen. Entropy-regularized token-level policy optimization for large language models. arXiv e-prints, pages arXiv-2402, 2024.", + "[18] Zhiheng Xi, Wenxiang Chen, Boyang Hong, Senjie Jin, Rui Zheng, Wei He, Yiwen Ding, Shichun Liu, Xin Guo, Junzhe Wang, et al. Training large language models for reasoning through reverse curriculum reinforcement learning. arXiv preprint arXiv:2402.05808, 2024.", + "[19] Zhaoyang Wang, Weilei He, Zhiyuan Liang, Xuchao Zhang, Chetan Bansal, Ying Wei, Weitong Zhang, and Huaxiu Yao. CREAM: Consistency regularized self-rewarding language models. In The Thirteenth International Conference on Learning Representations, 2025." + ], + "bbox": [ + 173, + 114, + 826, + 912 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[20] Yves Grandvalet and Yoshua Bengio. Semi-supervised learning by entropy minimization. Advances in neural information processing systems, 17, 2004.", + "[21] Dequan Wang, Evan Shelhamer, Shaoteng Liu, Bruno Olshausen, and Trevor Darrell. Tent: Fully test-time adaptation by entropy minimization. arXiv preprint arXiv:2006.10726, 2020.", + "[22] Qingyang Zhang, Yatao Bian, Xinke Kong, Peilin Zhao, and Changqing Zhang. Come: Test-time adaption by conservatively minimizing entropy. arXiv preprint arXiv:2410.10894, 2024.", + "[23] Lorenz Kuhn, Yarin Gal, and Sebastian Farquhar. Semantic uncertainty: Linguistic invariances for uncertainty estimation in natural language generation. arXiv preprint arXiv:2302.09664, 2023.", + "[24] Sebastian Farquhar, Jannik Kossen, Lorenz Kuhn, and Yarin Gal. Detecting hallucinations in large language models using semantic entropy. Nature, 630(8017):625-630, 2024.", + "[25] Eugene Kharitonov, Rahma Chaabouni, Diane Bouchacourt, and Marco Baroni. Entropy minimization in emergent languages. In International Conference on Machine Learning, pages 5220-5230. PMLR, 2020.", + "[26] Ori Press, Ravid Shwartz-Ziv, Yann LeCun, and Matthias Bethge. The entropy enigma: Success and failure of entropy minimization. arXiv preprint arXiv:2405.05012, 2024.", + "[27] Soren Mindermann, Jan M Brauner, Muhammed T Razzak, Mrinank Sharma, Andreas Kirsch, Winnie Xu, Benedikt Holgen, Aidan N Gomez, Adrien Morisot, Sebastian Farquhar, et al. Prioritized training on points that are learnable, worth learning, and not yet learnt. In International Conference on Machine Learning, pages 15630-15649. PMLR, 2022.", + "[28] Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. Dapo: An open-source llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476, 2025.", + "[29] Hugging Face. Open r1: A fully open reproduction of deepseek-r1, January 2025.", + "[30] Hanning Zhang, Jiarui Yao, Chenlu Ye, Wei Xiong, and Tong Zhang. Online-dpo-r1: Unlocking effective reasoning without the ppo overhead, 2025. Notion Blog.", + "[31] Jia LI, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, Kashif Rasul, Longhui Yu, Albert Jiang, Ziju Shen, Zihan Qin, Bin Dong, Li Zhou, Yann Fleureau, Guillaume Lample, and Stanislas Polu. Numinamath. [https://huggingface.co/AI-MO/NuminaMath-CoT](https://github.com/project-numina/aimo-progress-prize/blob/main/report/numina_dataset.pdf), 2024.", + "[32] Weizhe Yuan, Jane Yu, Song Jiang, Karthik Padthe, Yang Li, Dong Wang, Ilia Kulikov, Kyunghyun Cho, Yuandong Tian, Jason E Weston, et al. Naturalreasoning: Reasoning in the wild with $2.8\\mathrm{m}$ challenging questions. arXiv preprint arXiv:2502.13124, 2025.", + "[33] Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. Do not think that much for $2 + 3 = ?$ on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187, 2024.", + "[34] Huan Ma, Jingdong Chen, Guangyu Wang, and Changqing Zhang. Estimating llm uncertainty with logits. arXiv preprint arXiv:2502.00290, 2025.", + "[35] Weihao Zeng, Yuzhen Huang, Wei Liu, Keqing He, Qian Liu, Zejun Ma, and Junxian He. 7b model and 8k examples: Emerging reasoning with reinforcement learning is both effective and efficient. https://hkust-nlp.notion.site/simplerl-reason, 2025. Notion Blog.", + "[36] Yubo Wang, Xueguang Ma, Ge Zhang, Yuansheng Ni, Abhranil Chandra, Shiguang Guo, Weiming Ren, Aaran Arulraj, Xuan He, Ziyan Jiang, et al. Mmlu-pro: A more robust and challenging multi-task language understanding benchmark. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024.", + "[37] David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R Bowman. Gpqa: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling, 2024.", + "[38] Nathan Lambert, Jacob Morrison, Valentina Pyatkin, Shengyi Huang, Hamish Ivison, Faeze Brahman, Lester James V. Miranda, Alisa Liu, Nouha Dziri, Shane Lyu, Yuling Gu, Saumya Malik, Victoria Graf, Jena D. Hwang, Jiangjiang Yang, Ronan Le Bras, Oyvind Tafjord, Chris" + ], + "bbox": [ + 173, + 90, + 826, + 912 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Wilhelm, Luca Soldaini, Noah A. Smith, Yizhong Wang, Pradeep Dasigi, and Hannaneh Hajishirzi. Tulu 3: Pushing frontiers in open language model post-training. 2024.", + "[39] Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. Trl: Transformer reinforcement learning. https://github.com/huggingface/trl, 2020.", + "[40] Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, and Heung-Yeung Shum Xiangyu Zhang. Open-reasoner-zero: An open source approach to scaling reinforcement learning on the base model. https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero, 2025.", + "[41] Yang Yue, Zhiqi Chen, Rui Lu, Andrew Zhao, Zhaokai Wang, Shiji Song, and Gao Huang. Does reinforcement learning really incentivize reasoning capacity in llms beyond the base model? arXiv preprint arXiv:2504.13837, 2025.", + "[42] Yuda Song, Hanlin Zhang, Carson Eisenach, Sham Kakade, Dean Foster, and Udaya Ghai. Mind the gap: Examining the self-improvement capabilities of large language models. arXiv preprint arXiv:2412.02674, 2024.", + "[43] Zhengxuan Wu, Aryaman Arora, Zheng Wang, Atticus Geiger, Dan Jurafsky, Christopher D Manning, and Christopher Potts. Reft: Representation finetuning for language models. Advances in Neural Information Processing Systems, 37:63908-63962, 2024.", + "[44] George Casella and Roger Berger. Statistical inference. CRC press, 2024." + ], + "bbox": [ + 173, + 90, + 826, + 371 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Appendices", + "text_level": 1, + "bbox": [ + 171, + 89, + 274, + 108 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A Prompt Templates 13", + "B Case Study 15", + "C Implementation Details about Semantic Clustering 16", + "D Additional Results on Llama3 Model Series 16", + "E Additional Training Details 18", + "F Computational Cost of Semantic Clustering 18", + "G Details of Prompt Collection 19", + "H Additional Result about Pass@k 19", + "I The Influence of Clustering Quality on the Performance of EMPO 19" + ], + "bbox": [ + 173, + 125, + 825, + 407 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A Prompt Templates", + "text_level": 1, + "bbox": [ + 171, + 426, + 364, + 445 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We provide the prompt templates used for training and evaluation.", + "bbox": [ + 171, + 458, + 606, + 474 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "For mathematical reasoning tasks, we adopt the following reasoning prompt template similar to Online-DPO-R1 project [30] for both training and testing. During testing, we found that by adding system prompt, the accuracy of Qwen2.5-Math Base model can be better on mathematical benchmarks. However, system prompt would not help in natural reasoning tasks. Thus we use the same test prompt (start with system prompt) for both Base model and finetuned models in mathematical tasks. In natural reasoning tasks, we do not add system prompt for Base models.", + "bbox": [ + 169, + 479, + 826, + 564 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Mathematical Reasoning Training and Evaluation Template", + "text_level": 1, + "bbox": [ + 187, + 573, + 609, + 588 + ], + "page_idx": 12 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "system \nPlease reason step by step, and output your final answer within \\boxed{}}. \n \nuser \n{Question} Let's think step by step and output the final answer within \\boxed{}}. \n \nassistant", + "guess_lang": "txt", + "bbox": [ + 186, + 598, + 712, + 694 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To train models with our EMPO for free-form natural reasoning tasks, we adopt the following reasoning prompt template similar to that we used in mathematical tasks for training.", + "bbox": [ + 171, + 708, + 823, + 738 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Free-form Natural Reasoning Training Template", + "text_level": 1, + "bbox": [ + 187, + 747, + 531, + 762 + ], + "page_idx": 12 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "system \nReason step by step, and output your final answer within \\boxed{}?. \n \nuser \n{Question} Reason step by step and output the final answer within \\boxed{}?. \n \nassistant", + "guess_lang": "txt", + "bbox": [ + 186, + 772, + 689, + 869 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Since the MMLU-Pro and GPQA are both close-formed multi-choice benchmark. To evaluate the natural reasoning capability of the models, we use the following prompt template during testing.", + "bbox": [ + 171, + 882, + 823, + 912 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "MMLU Pro Test Template for Base Models", + "text_level": 1, + "bbox": [ + 189, + 92, + 491, + 107 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Question: {Question} Reason step by step and output the final answer (the correct letter choice from A-P) within \\boxed{}}.", + "bbox": [ + 187, + 116, + 808, + 143 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Answer:", + "bbox": [ + 187, + 145, + 246, + 157 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Few Shot MMLU Pro Test Template", + "text_level": 1, + "bbox": [ + 187, + 208, + 444, + 223 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Question: {Question in Demonstration 1} Reason step by step and output the final answer (the correct letter choice from A-P) within \\boxed{}", + "bbox": [ + 187, + 232, + 808, + 261 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Answer: Let's reason step by step. CoT of Demonstration 1 Therefore, the correct answer is Answer of Demonstration 1.", + "bbox": [ + 187, + 261, + 808, + 287 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "(Omit more demonstrations for readability)", + "bbox": [ + 187, + 297, + 473, + 316 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "... Question: {Question} Reason step by step and output the final answer (the correct letter choice from A-P) within \\boxed{}}.", + "bbox": [ + 187, + 324, + 808, + 359 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Answer:", + "bbox": [ + 187, + 359, + 246, + 369 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "MMLU Pro Test Template for Finetuned Models (SFT and RL)", + "text_level": 1, + "bbox": [ + 187, + 421, + 630, + 436 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "system \nReason step by step, and output your final answer (the correct letter choice from A-P) within \n\\boxed{}", + "bbox": [ + 187, + 446, + 808, + 488 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "", + "bbox": [ + 187, + 488, + 294, + 501 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "user", + "bbox": [ + 187, + 502, + 295, + 513 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "{Question} Reason step by step and output the final answer (the correct letter choice from A-P) within \\boxed{}", + "bbox": [ + 187, + 515, + 808, + 542 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "", + "bbox": [ + 187, + 542, + 263, + 556 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "assistant", + "bbox": [ + 187, + 556, + 323, + 570 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "GPQA Test Prompt for Base Models", + "text_level": 1, + "bbox": [ + 187, + 622, + 444, + 636 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Question: {Question} Reason step by step and output the final answer (A, B, C, or D) within \\boxed{}", + "bbox": [ + 187, + 646, + 808, + 674 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Answer:", + "bbox": [ + 187, + 675, + 246, + 686 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Few Shot GPQA Test Prompt for Base Models", + "text_level": 1, + "bbox": [ + 187, + 738, + 513, + 753 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Question: {Question in Demonstration 1} Reason step by step and output the final answer (A, B, C, or D) within \\boxed{}", + "bbox": [ + 187, + 762, + 808, + 791 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Answer: Let's reason step by step. CoT in Demonstration 1 Therefore, the correct final answer is Answer in Demonstration 1.", + "bbox": [ + 187, + 791, + 808, + 818 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "(Omit more demonstration for readability)", + "bbox": [ + 187, + 825, + 467, + 847 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "... Question: {Question} Reason step by step and output the final answer (A, B, C, or D) within \\boxed{}", + "bbox": [ + 187, + 854, + 808, + 887 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Answer:", + "bbox": [ + 187, + 888, + 246, + 900 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "GPQA Test Prompt for Finetuned Models (SFT and RL)", + "text_level": 1, + "bbox": [ + 187, + 92, + 584, + 107 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "system", + "bbox": [ + 186, + 117, + 313, + 131 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Reason step by step, and output your final answer (A, B, C, or D) within \\boxed{}", + "bbox": [ + 186, + 131, + 730, + 145 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "", + "bbox": [ + 187, + 145, + 263, + 157 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "user", + "bbox": [ + 187, + 160, + 295, + 172 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "{Question} Reason step by step and output the final answer (A, B, C, or D) within \\boxed{}", + "bbox": [ + 187, + 172, + 792, + 186 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "", + "bbox": [ + 187, + 186, + 264, + 199 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "assistant", + "bbox": [ + 187, + 200, + 323, + 213 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B Case Study", + "text_level": 1, + "bbox": [ + 171, + 244, + 303, + 262 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Qwen2.5-Math-7B's Answer to the 18-th Question in AMC23", + "text_level": 1, + "bbox": [ + 187, + 281, + 617, + 296 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Question: Janet rolls a standard 6-sided die 4 times and keeps a running total of the numbers she rolls. What is the probability that at some point, her running total will equal 3? The final answer can be written in the form $\\frac{m}{n}$ , where $m$ and $n$ are relatively prime positive integers. What is $m + n$ ? Response: To solve this problem, we need to determine the probability that at some point during 4 rolls of a standard 6-sided die, the running total will equal 3. We can use dynamic programming to keep track of the probabilities of each possible running total after each roll. Here's the plan:", + "bbox": [ + 186, + 305, + 808, + 404 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Initialize a list to keep track of the probability of each possible running total after each roll.", + "2. Start with the initial condition that the probability of a running total of 0 after 0 rolls is 1.", + "3. For each roll, update the probabilities of each possible running total by considering the outcomes of rolling a 1, 2, 3, 4, 5, or 6.", + "4. After each roll, check if the probability of a running total of 3 is non-zero.", + "5. The final answer will be the probability of a running total of 3 after 4 rolls. Let's implement this in Python:" + ], + "bbox": [ + 187, + 404, + 807, + 500 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "···", + "bbox": [ + 187, + 507, + 204, + 513 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "(We omit the python code for readability.)", + "bbox": [ + 187, + 513, + 465, + 527 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "···", + "bbox": [ + 187, + 534, + 204, + 540 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "It looks like there was an issue with the implementation. The probability of the running total being exactly 3 after 4 rolls should not be zero, so let's correct the approach and re-implement the solution. Here's the corrected plan:", + "bbox": [ + 187, + 541, + 807, + 582 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Initialize a list to keep track of the probability of each possible running total after each roll.", + "2. Start with the initial condition that the probability of a running total of 0 after 0 rolls is 1.", + "3. For each roll, update the probabilities of each possible running total by considering the outcomes of rolling a 1, 2, 3, 4, 5, or 6.", + "4. After each roll, check if the probability of a running total of 3 is non-zero.", + "5. The final answer will be the probability of a running total of 3 after 4 rolls." + ], + "bbox": [ + 187, + 583, + 807, + 665 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Let's re-implement this in Python:", + "bbox": [ + 187, + 666, + 415, + 679 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "···", + "bbox": [ + 187, + 688, + 204, + 694 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "(We omit the python code for readability.)", + "bbox": [ + 187, + 693, + 465, + 707 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "It seems there is still an issue with the implementation. Let's manually check the logic and correct it step by step.", + "bbox": [ + 187, + 718, + 807, + 747 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Initialize the probability list with the initial condition that the probability of a running total of 0 after 0 rolls is 1.", + "2. For each roll, update the probabilities of each possible running total by considering the outcomes of rolling a 1, 2, 3, 4, 5, or 6.", + "3. After each roll, check if the probability of a running total of 3 is non-zero.", + "4. The final answer will be the probability of a running total of 3 after 4 rolls. Let's manually simulate the process: - After 0 rolls: Probability of 0 is 1" + ], + "bbox": [ + 187, + 748, + 807, + 845 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "···", + "bbox": [ + 187, + 852, + 202, + 857 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The probability of a running total of 3 after 4 rolls is $4 / 1296$ , which simplifies to $1 / 324$ . Therefore, the final answer in the form of $m / n$ is $1 / 324$ , and the sum $m + n$ is $1 + 324 = 325$ . The final answer is $\\boxed{325}$ .", + "bbox": [ + 187, + 858, + 807, + 902 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C Implementation Details about Semantic Clustering", + "text_level": 1, + "bbox": [ + 171, + 89, + 635, + 108 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We detail the implementation of semantic clustering in our EMP0. The semantic cluster process is shown as Algorithm 1. We also provide the simplified python code to show how we determine whether two model outputs are equivalent or not in mathematical reasoning (Algorithm 2) and free-form natural reasoning tasks (Algorithm 3).", + "bbox": [ + 169, + 118, + 826, + 176 + ], + "page_idx": 15 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Algorithm 1: Semantic Clustering" + ], + "code_body": "Require : question $q$ , a group set of model response $\\{o_2,\\dots,o_G\\}$ , verifier $\\nu$ Initialize: $C = \\{o_1\\}$ \nfor $2\\leq i\\leq G$ do \nfor $c\\in C$ do // Random choose one element from $c$ for comparison $o_c = c[0]$ // Is the meaning of old sequence equivalent to new one? if $\\mathcal{V}(q,o_c,o_i) ==$ True then // Put into existing class $c = c\\cup \\{o_i\\}$ break \nend \nend \n// $o_i$ is semantically distinct, belongs to a novel cluster. \n $C\\gets C\\cup \\{o_i\\}$ \nend \nReturn :C", + "guess_lang": "verilog", + "bbox": [ + 158, + 209, + 707, + 435 + ], + "page_idx": 15 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Algorithm 2: Implementation of verifier for mathematical reasoning tasks." + ], + "code_body": "from math_VERIFY import parse, verify \ndef are_equivalent (model_output_1, model_output_2) prediction_1 $=$ parse(model_output_1) prediction_2 $=$ parse(model_output_2) return verify(prediction_1,prediction_2)", + "guess_lang": "python", + "bbox": [ + 184, + 483, + 712, + 563 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "D Additional Results on Llama3 Model Series", + "text_level": 1, + "bbox": [ + 171, + 590, + 573, + 607 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We conduct additional experiments to validate the efficacy of our EMP0 on other model series beyond Qwen2.5. The results are shown in Table 3. Consistent with other concurrent practice, we are unable to implement R1-Zero-like training on the Llama series, i.e., directly initializing RL process from the Base model without SFT). Thus, we instead consider a semi-supervised learning approach by initializing from instruct-tuned model and enhance the reasoning capability with our EMP0. As shown in Table 3, when initialize from Llama3.2-3B-Instruct model, our EMP0 can also substantially improve reasoning capability of instruct-tuned model which have undergone carefully-designed post-training.", + "bbox": [ + 169, + 621, + 826, + 720 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Why Qwen2.5 Base model can initialize fully unsupervised RL training, while Llama3 can not?", + "text_level": 1, + "bbox": [ + 169, + 724, + 823, + 739 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Consistent with open-source community practices, we found that R1-Zero-like RL training can only be reproduced unsupervised on Qwen2.5 series Base models. In contrast, Llama3 series model still necessitate \"cold-start\", i.e., SFT, before RL. Specifically, in our experiments, the Qwen2.5 Base models demonstrated inherent answer consistency from the initial stages of EMPO training. However, Llama3 series Base models suffer severe inconsistency and fail to convergence during training. We hypothesize this divergence stems from Qwen2.5's pretraining strategy. As mentioned in the technical report [5], the pretrain data corpus are mixed with both web text and QA pairs generated by instruct-tuned Qwen2 models. This endows Qwen2.5 Base models with native instruction-following capabilities. Experimental evidence supports this hypothesis. As shown in Table 2, Qwen2.5 Base models successfully follow the instruction such as \"put the final answer (A-P) within box\" when answering multiple-choice questions from MMLU Pro and achieve an accuracy notably higher than random guess.", + "bbox": [ + 169, + 744, + 826, + 912 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 935, + 509, + 946 + ], + "page_idx": 15 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Algorithm 3: Implementation of verifier for natural reasoning tasks." + ], + "code_body": "{\n verifier = AutoModelForCausalLM.from_pretrained(...);\n tokenizer = AutoTokenizer.from_pretrained(...);\n}\ndef are_equivalent(model_output_1, model_output_2, question, verifier)\n prediction_1 = parse(model_output_1)\n prediction_2 = parse(model_output_2)\n prompt = (\n f\"User: ## Question: {question}\\n\\n\"\n f\"## Ground Truth Answer: {prediction_1}\\n\\n\"\n f\"## Student Answer: {prediction_2}\\n\\n\"\n \"For the above question, please verify if the student's answer is equivalent to the ground truth answer.\\n\"\n \"Do not solve the question by yourself; just check if the student's answer is equivalent to the ground truth answer.\\n\"\n \"If correct, output Final Decision: Yes\".\n \"If incorrect, output Final Decision: No\\..\\n\"\n \"Assistant: Final Decision: \"\n )\n inputs = selftokenizer(modified_prompt, return_tensors=\"pt\").to(self.model_device)\n input_ids = inputs-input_ids\n # inference for output logits\n with torch.inference_mode():\n outputs = self.model.forward(input_ids)\n logits = outputs.logits\n # get next output logits\n next_token_logits = logits[0, input_ids.shape[1] - 1, :]\n # get the token ID of \"Yes\" and \"No\"\n decision_tokens = selftokenizer(\"Yes\", \"No\")\n yes_id = decision_tokens.input_ids[0]\n no_id = decision_tokens.input_ids[1]\n # calculate probability\n probs = torch softmax(next_token_logits, dim=0)\n yes_prob = probs[yes_id].item()\n no_prob = probs[no_id].item()\n return yes_prob > no_prob", + "guess_lang": "python", + "bbox": [ + 181, + 207, + 861, + 806 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/60a4a919becdc853bd38aaa5ce700b90b4c33d2b4e994f1e89f907e7218a2031.jpg", + "table_caption": [ + "Table 3: Accuracy on mathematical reasoning benchmarks." + ], + "table_footnote": [], + "table_body": "
SupervisionMATHMinerva MathOMNIAIME24AMC23Avg.
frontier model
Llama-3.1-70B-Instruct{q,r,a}64.635.331.916.730.135.7
Eurus-2-7B-PRIME{q,r,a}79.238.642.126.757.848.9
1B model
Llama3.2-InstructNone27.25.15.60.010.09.6
Llama3.2-Instruct w/GRPO{q,a}29.83.76.40.012.510.5
Llama3.2-Instruct w/EMPO{q}31.05.17.93.37.511.0
3B model
Llama3.2-InstructNone46.219.115.33.320.020.8
Llama3.2-Instruct w/GRPO{q,a}49.222.417.613.332.527.0
Llama3.2-Instruct w/EMPO{q}49.820.218.413.330.026.3
", + "bbox": [ + 173, + 108, + 828, + 282 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "E Additional Training Details", + "text_level": 1, + "bbox": [ + 171, + 313, + 439, + 330 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We provide a brief summary of our training recipes in Table 4. Besides, we have release the code in the supplementary materials which contained the full training configurations for re-implementation.", + "bbox": [ + 169, + 349, + 826, + 380 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/bcab8cb78366e87f8dd8e4a498f6c241b7dd8324c9b304d720703a5b37b80b08.jpg", + "table_caption": [ + "Table 4: A brief summary of training recipes of Qwen2.5 Base models." + ], + "table_footnote": [], + "table_body": "
1.5B-Math7B-Math3B7B14B
Number of generations77121212
Learning rate3e-73e-73e-73e-73e-7
Max completion length2048204810241024768
Batch size per GPU12111
", + "bbox": [ + 269, + 421, + 730, + 502 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "F Computational Cost of Semantic Clustering", + "text_level": 1, + "bbox": [ + 169, + 546, + 576, + 565 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Given the number of responses sampled per question $G$ (i.e., the group size) and the training dataset size $N$ , the time complexity of the clustering process is $O(G^2 \\times N)$ . In mathematical reasoning tasks, semantic clustering is implemented by regular expressions which do not involve notable computational cost. For natural reasoning tasks, we rely on an additional compact small language model. To evaluate the additional computational overhead introduced by semantic clustering in EMPO, we conducted comparative analyses of EMPO and GRPO in terms of total training duration and GPU memory utilization. The results of mathematical reasoning and natural reasoning are shown in Table 6, respectively. It is worthy to note that the 14B model experiments require slightly less computational time than the 7B model. This is because, in our 14B experiments, we reduced the batch size and maximum response length from 2 and 1024 to 1 and 768, respectively, compared to the 3B and 7B configurations. This adjustment was made to fit the limited GPU memory of one single $8 \\times \\mathrm{A}100$ 80G machine.", + "bbox": [ + 169, + 583, + 826, + 750 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 5: Comparison of total runtime (measured as $8 \\times$ A100 GPU hours) and storage cost (measured by max total GPU memory (GiB) utilization) between GRPO and EMP0. The GPU Memory semantic cluster process requires minimal computation and storage.", + "bbox": [ + 169, + 768, + 826, + 811 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/2f2bfbad48ec71e771da0d9c1a85dcef5bdda49c28e044cbd20eadabff19c212.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Qwen2.5-1.5B-MathQwen2.5-7B-Math
GPU HoursGPU MemGPU HoursGPU Mem
GRPO11.2240.48.5501.3
EMPO11.7208.28.7532.7
", + "bbox": [ + 295, + 816, + 702, + 893 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 935, + 509, + 946 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/1099887c44ad2b598c5b47017a4cafdffed2a0d290e926bdf6596db1d87f0f65.jpg", + "table_caption": [ + "Table 6: Comparison of total runtime (measured as $8 \\times$ A100 GPU hours) and storage cost (measured by total GPU memory (GiB) utilization) between GRPO and EMP0. The GPU Memory semantic cluster process requires minimal computation and storage." + ], + "table_footnote": [], + "table_body": "
Qwen2.5-3BQwen2.5-7BQwen2.5-14B
GPU HoursGPU MemGPU HoursGPU MemGPU HoursGPU Mem
GRPO9.5274.812.4508.611.0588.2
EMPO11.1286.914.6532.711.5541.1
", + "bbox": [ + 207, + 137, + 787, + 212 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "G Details of Prompt Collection", + "text_level": 1, + "bbox": [ + 171, + 234, + 449, + 252 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "For mathematical reasoning, we directly use 20,000 prompts randomly selected from Numina-Math-CoT. For free-form natural reasoning tasks, we adopt the prompts from Natural Reasoning5 by filtering out the questions with over-long prompt, reference answer. Besides, we use the response length of Llama3.3-70B-Instruct as a difficulty estimation metric, and filter out overly difficult samples with response lengths exceeding 4096 tokens. The data collection python code is demonstrated as follow:", + "bbox": [ + 169, + 265, + 826, + 335 + ], + "page_idx": 18 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Algorithm 4: Python code of data filtering in a huggingface-like style." + ], + "code_body": "from datasets import load_dataset \ndataset = load_dataset(\"facebook/Natural-Reasoning\") \nfiltered_dataset = dataset.filter( lambda x: (\n # no answer\n len(x[\"reference_answer\"])) > 0\n # over-long answer\n and len(x[\"reference_answer\"]} < 129\n # overly difficult questions\n and len(x[\"llamaresponses\"]} < 4096\n # over-long prompt\n and len(x[\"question\"]} < 512\n # proof-oriented\n and (\"prove\" not in x[\"question\"].lower())\n and (\"proof\" not in x[\"question\"].lower())\n)", + "guess_lang": "python", + "bbox": [ + 179, + 373, + 715, + 616 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "H Additional Result about Pass@k", + "text_level": 1, + "bbox": [ + 171, + 646, + 480, + 662 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We provide additional visualization pass@k results of models trained with EMP0. The results are shown as follow. As shown in Figure H, the Base model consistently catch up with RL trained models when k is large.", + "bbox": [ + 169, + 676, + 823, + 719 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "I The Influence of Clustering Quality on the Performance of EMPO", + "text_level": 1, + "bbox": [ + 171, + 738, + 754, + 756 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In our mathematical reasoning experiments, semantic clustering is achieved solely through regular expression matching without introducing additional models. Due to the naturally structured response formats in mathematical tasks, regular expression could accurately determine answer equivalence, resulting in relatively high clustering quality.", + "bbox": [ + 169, + 768, + 826, + 827 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "However, in more general free-form natural reasoning tasks where model responses are free-form much more diverse (e.g., matrix, numbers, a few lines of sentences/codes...), the clustering quality can impact EMPO's effectiveness. For instance, in our more early practice, we tried DeBERTa (a bert-like model with 300M parameters trained by microsoft) for semantic clustering. Due to", + "bbox": [ + 169, + 830, + 825, + 888 + ], + "page_idx": 18 + }, + { + "type": "page_footnote", + "text": "5https://huggingface.co/datasets/facebook/natural_reasoning", + "bbox": [ + 189, + 896, + 550, + 912 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/2f1ddd8ff1e96a17be6f46c9ff2bb458e712e643653068eb46e98c12091fa5e9.jpg", + "image_caption": [ + "Figure 5: Trend of pass@k accuracy on Math test-set." + ], + "image_footnote": [], + "bbox": [ + 181, + 95, + 480, + 244 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/4db2f4e321e46330bce0ead90d1173456c617c799def72823b107ed1cd146436.jpg", + "image_caption": [ + "Figure 6: Trend of pass@k accuracy on OMNI test-set." + ], + "image_footnote": [], + "bbox": [ + 517, + 95, + 816, + 244 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "the poor quality of semantic clustering, our EMPO straggled to scale up and suffered from frequent reward hacking. Subsequently, by leveraging the general-verifier released by Tiger-Lab (a fine-tuned Qwen2.5-1.5B-Math model) for clustering, we successfully generalized EMPO to more general free-form reasoning tasks. Noted that even though this small language model undergoes supervised finetuning, it serves within our fully unsupervised framework as a fixed utility function for semantic comparison, rather than serving as an external supervisor for task-specific feedback. There are several fundamental difference between cluster model and the reward model used in supervised RL:", + "bbox": [ + 169, + 325, + 826, + 424 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The cluster model does not evaluate output correctness relative to input queries. It just provides pairwise comparisons between the model's own outputs. That is, it only provides binary answer about \"whether these two answers are the same?\" rather than \"which answer is better?\".", + "- The cluster model does not provide any guidance, such as gradient information or hints on how to refine the reasoning traces.", + "- Compared to reward model or human-verifier golden answers, it can be much easier to implement such a cluster model. For example, in mathematical reasoning tasks, only regular expressions are enough for clustering. In natural reasoning tasks, a finetuned Qwen2.5-1B model can provide high quality semantic cluster results." + ], + "bbox": [ + 215, + 434, + 823, + 583 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Essentially, this is related to the non-identifiability problem in statistical inference [44]. The issue of non-identifiability arises because multiple, distinct underlying states (potential \"truths,\" or more accurately, different reasoning pathways or different clusters of incorrect answers) could produce the same pattern of relational signals (i.e., the same semantic clustering results).", + "bbox": [ + 169, + 593, + 826, + 650 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 19 + } +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05812/2ebd62c4-e647-47e7-bb58-1c94267578a3_model.json b/data/2025/2504_05xxx/2504.05812/2ebd62c4-e647-47e7-bb58-1c94267578a3_model.json new file mode 100644 index 0000000000000000000000000000000000000000..d91af6befb8f48ea9090130ce0fe414676fbf04c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/2ebd62c4-e647-47e7-bb58-1c94267578a3_model.json @@ -0,0 +1,3375 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.256, + 0.061, + 0.708 + ], + "angle": 270, + "content": "arXiv:2504.05812v3 [cs.LG] 18 May 2025" + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.123, + 0.813, + 0.174 + ], + "angle": 0, + "content": "Right Question is Already Half the Answer: Fully Unsupervised LLM Reasoning Incentivization" + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.226, + 0.365, + 0.241 + ], + "angle": 0, + "content": "Qingyang Zhang" + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.242, + 0.363, + 0.256 + ], + "angle": 0, + "content": "Tianjin University" + }, + { + "type": "text", + "bbox": [ + 0.458, + 0.227, + 0.536, + 0.24 + ], + "angle": 0, + "content": "Haitao Wu" + }, + { + "type": "text", + "bbox": [ + 0.437, + 0.242, + 0.557, + 0.256 + ], + "angle": 0, + "content": "Tianjin University" + }, + { + "type": "text", + "bbox": [ + 0.63, + 0.227, + 0.759, + 0.242 + ], + "angle": 0, + "content": "Changqing Zhang" + }, + { + "type": "text", + "bbox": [ + 0.634, + 0.242, + 0.755, + 0.256 + ], + "angle": 0, + "content": "Tianjin University" + }, + { + "type": "text", + "bbox": [ + 0.312, + 0.277, + 0.395, + 0.289 + ], + "angle": 0, + "content": "Peilin Zhao" + }, + { + "type": "text", + "bbox": [ + 0.301, + 0.291, + 0.405, + 0.303 + ], + "angle": 0, + "content": "Tencent AI Lab" + }, + { + "type": "text", + "bbox": [ + 0.579, + 0.277, + 0.658, + 0.289 + ], + "angle": 0, + "content": "Yatao Bian" + }, + { + "type": "text", + "bbox": [ + 0.54, + 0.291, + 0.697, + 0.303 + ], + "angle": 0, + "content": "Tencent AI Lab & NUS" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.34, + 0.538, + 0.357 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.37, + 0.768, + 0.565 + ], + "angle": 0, + "content": "Existing methods to enhance the reasoning capability of large language models predominantly rely on supervised fine-tuning (SFT) followed by reinforcement learning (RL) on reasoning-specific data. These approaches critically depend on external supervisions—such as labeled reasoning traces, verified golden answers, or pre-trained reward models. In this work, we propose Entropy Minimized Policy Optimization (EMPO), which makes an early attempt at fully unsupervised LLM reasoning incentivization. By continuously minimizing the predictive entropy of LLMs on unlabeled questions in a latent semantic space, EMP0 achieves competitive performance compared to supervised counterparts on both mathematical and freeform natural reasoning tasks. Specifically, without any supervised signals, EMP0 boosts the accuracy of Qwen2.5-Math-7B Base from \\(30.7\\%\\) to \\(48.1\\%\\) on mathematical benchmarks and improves the accuracy of Qwen2.5-7B Base from \\(32.1\\%\\) to \\(50.1\\%\\) on MMLU-Pro. Primary experiments and analysis are also provided to interpret the effectiveness of EMP0. Code is available at this url." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.589, + 0.314, + 0.604 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.619, + 0.501, + 0.799 + ], + "angle": 0, + "content": "Large language models (LLMs) have demonstrated exceptional potential in challenging tasks such as mathematical reasoning [1, 2, 3] and code generation [4]. A prevailing paradigm for training reasoning LLMs involves firstly performing supervised fine-tuning (SFT) and then reinforcement learning (RL), or iterative combinations of both, applied to reasoning-specific datasets after pretraining [5]. Unfortunately, these methods typically depend on large-scale reasoning datasets with various forms of supervised information, such as human-labeled reasoning traces, verified golden answers, or an additional pre-trained re" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.799, + 0.825, + 0.841 + ], + "angle": 0, + "content": "ward model. As a consequence, endowing LLMs with powerful reasoning capability through human experts is becoming increasingly time-consuming and costly, which greatly limits the scalability and broader adoption of reasoning models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.847, + 0.827, + 0.904 + ], + "angle": 0, + "content": "To mitigate this, previous work employs self-consistency to construct pseudo data and deploy supervised finetuning for better performance [6]. However, the performance improvement is limited and under risks of model collapse [7]. Recent advancements, such as the pioneering work PFPO [8], frame the labeling of solutions as evaluation against test cases and then leverage self-consistency" + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.64, + 0.803, + 0.756 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.509, + 0.761, + 0.825, + 0.788 + ], + "angle": 0, + "content": "Figure 1: Improvement of the proposed method on Qwen2.5-7B and Qwen2.5-7B-Math model." + }, + { + "type": "footer", + "bbox": [ + 0.172, + 0.923, + 0.228, + 0.937 + ], + "angle": 0, + "content": "Preprint." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.19 + ], + "angle": 0, + "content": "to generate pseudo test cases. Despite the promising results, the proposed method still necessitates supervision from instruction finetuning data and supervision signals from the frontier LLMs to initialize the RL process. Another more recent work [9] introduces a two-stage framework to construct self-rewarding reasoning models using self-generated data followed by RL. Despite the superior performance, the proposed method relies on a ground-truth verifier to obtain self-correction reasoning traces by rejection sampling. These approaches inspire our exploration of a critical open question: How can we incentivize LLM reasoning capacities in a fully unsupervised manner?" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.204, + 0.816, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.373, + 0.317, + 0.6, + 0.328 + ], + "angle": 0, + "content": "(a) Comparison of different RL methods" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.332, + 0.816, + 0.468 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.42, + 0.472, + 0.553, + 0.484 + ], + "angle": 0, + "content": "(b) Overview of EMPO" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.502, + 0.825, + 0.6 + ], + "angle": 0, + "content": "Figure 2: Overview of the proposed method. (a) Previous method like PPO [10] or GRPO [11] typically relies on external supervised signals, e.g., a pretrained reward model or golden answers. (b) The proposed Entropy Minimized Policy Optimization (EMPO) samples a set of responses from the current policy model, and then builds semantic clusters according to their equivalence. By continuously minimizing the entropy at a meaning level, our method achieves competitive benchmark performance without any external supervision, i.e., rule-based reward, pre-defined test cases or an pre-trained reward model." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.621, + 0.827, + 0.83 + ], + "angle": 0, + "content": "Recent advanced DeepSeek-R1-Zero [12] demonstrates robust reasoning capabilities without dependency on SFT data. By directly initiating RL from the base model, DeepSeek-R1-Zero autonomously evolves sophisticated reasoning behaviors such as reflection and self-critic by exploring the reward signals provided by rule-based rewards. i.e., verified golden answers or an additional pre-trained reward model. Inspired by the success of DeepSeek-R1-Zero, our motivation is to devise a fully unsupervised approach for powerful reasoning capability. Specifically, we propose a novel reinforcement learning algorithm termed as Entropy Minimized Policy Optimization (EMP0), which incentivizes the reasoning capability of LLMs in a fully unsupervised manner by minimizing their predictive entropy in a latent semantic space. This method optimizes the model to favor reasoning traces yielding consistent answers, enhancing output reliability. The semantic entropy objective we propose to minimize is a well-established measurement of LLMs' uncertainty, which extends beyond mathematical reasoning to free-form question-answering tasks. We further introduce entropy thresholding to filter unreliable reasoning traces, stabilizing the unsupervised training process. Experiments on various tasks including mathematical reasoning and free-form natural reasoning are conducted to validate the proposed method. Our contributions are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.839, + 0.825, + 0.879 + ], + "angle": 0, + "content": "- We propose an effective and principled strategy called Entropy-Minimized Policy Optimization (EMPO) for incentivizing the reasoning capabilities of LLMs in a fully unsupervised manner." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "- We establish semantic entropy as a potent intrinsic reward signal for guiding LLM reasoning. Our analysis confirms a strong negative correlation between semantic entropy and model" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.839, + 0.825, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.23, + 0.092, + 0.825, + 0.121 + ], + "angle": 0, + "content": "accuracy, validating its efficacy as a robust, unsupervised optimization objective that drives models towards generating more consistent and reliable outputs." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.13, + 0.828, + 0.228 + ], + "angle": 0, + "content": "- Experiments on both math reasoning tasks with deterministic golden answers and freeform natural reasoning tasks are conducted to validate the efficacy and versatility of EMP0. Additionally, we provide critical insights into EMP0's mechanism, demonstrating that its effectiveness stems from an enhanced ability to consistently select and prioritize strong, pre-existing reasoning pathways learned during pre-training, rather than instilling fundamentally new reasoning skills. This underscores EMP0's strength in efficiently eliciting and refining latent capabilities within base models." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.254, + 0.325, + 0.27 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.289, + 0.828, + 0.496 + ], + "angle": 0, + "content": "Self-Supervised and Semi-Supervised Reasoning. To address the dependency on labeled data, several self-supervised and unsupervised methods have emerged. Huang et al. [6] propose a self-improvement framework where LLMs generate high-confidence answers using Chain-of-Thought (CoT) prompting and self-consistency, subsequently fine-tuning on these pseudo-labels. However, the performance gains are often limited, and there is a risk of model collapse, as noted in [7]. Recently, Patel et al. [13] apply self-improvement to web navigation tasks in WebArena, fine-tuning on synthetic data generated by the model itself. Li et al. [14] enhance long-context reasoning via SeaLong, sampling multiple outputs and optimizing with Minimum Bayes Risk. These methods, while reducing reliance on external labels, still involve supervised fine-tuning steps, contrasting with EMPO's fully unsupervised RL approach. A concurrent work, i.e., test-time reinforcement learning (TTRL) [15] directly obtains pseudo label by major voting and then conducts RL on test prompts at inference time, whereas our EMPO strictly maintains the separation between training and testing phases for ensuring that the model remains unexposed to any test prompts during training. Furthermore, while TTRL is currently limited to mathematical tasks, our approach is applicable to more general free-form reasoning tasks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.517, + 0.827, + 0.726 + ], + "angle": 0, + "content": "Self-Rewarding and RL-based Reasoning. RL has become a prominent technique for enhancing LLM reasoning, often leveraging external or self-generated rewards. Yuan et al. [16] propose using the LLM itself via LLM-as-a-Judge prompting to provide rewards during training, reducing reliance on human feedback. Similarly, Xiong et al. [9] propose a two-stage self-rewarding framework for mathematical reasoning, generating data and applying RL with a ground-truth verifier for self-correction, achieving superior performance but requiring supervised signals. Jiao et al. [8] frame solution labeling as evaluation against test cases, yet still rely on instruction fine-tuning and frontier LLM signals for RL initialization. Wen et al. [17] introduce Entropy-Regularized Token-Level Policy Optimization (ETPO), augmenting RL with an entropy bonus to promote exploration, differing from EMP0's entropy minimization focus. Guo et al. [12] with DeepSeek-R1 demonstrate robust reasoning via RL from a base model, using rule-based rewards. Xi et al. [18] present \\(\\mathbb{R}^3\\), a reverse curriculum RL approach using outcome supervision to mimic process supervision benefits. Wang et al. [19] propose CREAM, which enforces consistency regularization between internal reward models during self-training. These methods highlight a spectrum of supervision levels, positioning EMP0 as unique in its fully unsupervised nature, leveraging semantic entropy as an internal reward." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.746, + 0.828, + 0.911 + ], + "angle": 0, + "content": "Entropy Minimization and Semantic Consistency. Entropy minimization is a well-established technique in semi-supervised and unsupervised learning, with roots in traditional machine learning. Grandvalet and Bengio [20] demonstrate that minimizing entropy on unlabeled data can improve classification accuracy by encouraging model confidence. Test-time adaptation methods like Tent [21] adapt models to new domains by minimizing entropy on test data, filling domain gaps without additional labels. More recent work, COME, [22] extends this principle to conservative entropy minimization for robust adaptation. These approaches highlight the potential of entropy minimization as an unsupervised objective, which EMP0 leverages for LLM reasoning by extending it to semantic entropy [23] in a latent space. Farquhar et al. [24] further validate semantic entropy's utility in detecting hallucinations, reinforcing its relevance. Kharitonov et al. [25] explore entropy minimization in emergent languages, finding it naturally aligns with successful communication, providing additional theoretical foundation for EMP0." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.09, + 0.274, + 0.105 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.121, + 0.825, + 0.178 + ], + "angle": 0, + "content": "We propose an RL-based method to minimize the entropy of LLM generations in a latent semantic space for incentivizing its reasoning capability. We term our method Entropy-Minimized Policy Optimization (EMPO), which is devised in a fully unsupervised manner without any forms of external supervised information." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.192, + 0.308, + 0.206 + ], + "angle": 0, + "content": "3.1 Preliminaries" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.217, + 0.825, + 0.289 + ], + "angle": 0, + "content": "Recent advancements in reinforcement learning have demonstrated remarkable breakthroughs in enhancing the reasoning capabilities of LLMs. Taking the representative RL technique Group Relative Policy Optimization (GRPO) [11] used by DeepSeek-R1-Zero [12] as an example. GRPO first samples a group of outputs \\(\\{o_1, \\dots, o_G\\}\\) from the policy model \\(\\pi_{\\theta}\\) and then optimizes it by maximizing the following objective:" + }, + { + "type": "equation", + "bbox": [ + 0.261, + 0.293, + 0.826, + 0.355 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {J} _ {\\mathrm {G R P O}} = \\mathbb {E} _ {[ q \\sim P (Q), \\{o _ {i} \\} _ {i = 1} \\sim \\pi_ {\\theta (O | q)} ]} \\\\ \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\left(\\min \\left(A _ {i}, \\operatorname {c l i p} (1, 1 - \\epsilon , 1 + \\epsilon) A _ {i}\\right) - \\beta K L \\left(\\pi_ {\\theta} \\mid \\mid \\pi_ {r e f}\\right) \\right. \\right], \\tag {1} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.359, + 0.827, + 0.422 + ], + "angle": 0, + "content": "where \\(\\beta\\) is a hyper-parameter which avoids the policy model to diverge too far away from the reference model \\(\\pi_{ref}\\). \\(\\epsilon\\) clips extreme advantages for stability. \\(G\\) is the number of samples in one group. \\(A_{i}\\) is the advantage computed by normalizing the rewards within each group as \\(A_{i} = \\frac{r_{i} - mean(\\{r_{1},\\cdots,r_{G}\\})}{std(r_{1},\\cdots,r_{G})}\\). In math reasoning task, the reward can be computed by predefined rules:" + }, + { + "type": "equation", + "bbox": [ + 0.375, + 0.428, + 0.826, + 0.461 + ], + "angle": 0, + "content": "\\[\nr _ {i} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f v e r i f i e r} (o _ {i}, a) = \\text {T r u e} \\\\ 0 & \\text {o t h e r w i s e} \\end{array} , \\right. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.466, + 0.827, + 0.482 + ], + "angle": 0, + "content": "where a verifier is used to determine the correctness of \\( o_i \\) by comparing it with the golden answer \\( a \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.487, + 0.825, + 0.57 + ], + "angle": 0, + "content": "Unlike the above example, we consider fully unsupervised optimization settings where there are no golden answers to verify the correctness of model predictions. In this circumstance, we only have unlabeled reasoning problems \\( P(Q) \\). Such problems were freely raised by users during the deployment of LLMs. Given a pre-training LLM \\( \\pi_{\\theta} \\) parameterized by \\( \\theta \\), our goal is to enhance its reasoning ability by only utilizing the unlabeled user problems \\( \\{q_i\\}_{i=1}^n \\), which requests minimized cost of data collection." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.586, + 0.509, + 0.601 + ], + "angle": 0, + "content": "3.2 Semantic Entropy Minimization Objective" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.611, + 0.826, + 0.764 + ], + "angle": 0, + "content": "Entropy is a classical unsupervised objective in the traditional semi-supervised and unsupervised learning fields [20, 26]. Previous works in computer vision show that by continuously minimizing the entropy on unlabeled samples after pre-training, the classification accuracy of machine learning models can be significantly improved to fill the domain gaps [21, 22]. The basic intuition behind entropy minimization is that a robust model should not only fit labeled data well but also make confident and consistent predictions on unlabeled data. This principle encourages the model to avoid ambiguity and make decisive predictions, thereby enhances generalization. In this work, we choose semantic entropy [23] as our unsupervised optimization objective, which is a natural extension of classical Shannon entropy specified for large language models. Intuitively speaking, minimizing semantic entropy encourages the LLMs' outputs to be more consistent in semantic level rather than format, and thus the final answers are expected to be more reliable." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.77, + 0.826, + 0.866 + ], + "angle": 0, + "content": "Specifically, semantic entropy first samples a group of outputs \\(\\{o_1,\\dots ,o_G\\}\\) and then clusters the output sequences according to their meaning. That is, if two outputs share the same meaning (i.e., they are bidirectionally entailed), they should be merged into one same cluster in the semantic space. This can be done without notable computational cost by predefined rules such as N-gram, regular expressions or an additional small language model. Once built such a set of meaning clusters \\(\\{c\\}\\) in semantic space, we then approximate the probability over the meanings as the proportion of sampled answers as" + }, + { + "type": "equation", + "bbox": [ + 0.436, + 0.866, + 0.825, + 0.883 + ], + "angle": 0, + "content": "\\[\np \\left(c _ {j} \\mid x\\right) \\approx \\left| c _ {j} \\right| / G, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.827, + 0.913 + ], + "angle": 0, + "content": "where \\( c_{j} \\in \\{c\\} \\) is the \\( j \\)-th meaning cluster. \\( |c_{j}| \\) denotes the numbers of outputs that belong to \\( c_{j} \\). Finally, given question \\( q \\), the semantic entropy (denoted as \\( H \\)) over the model's output meanings" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.947 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.172, + 0.092, + 0.437, + 0.106 + ], + "angle": 0, + "content": "distribution can be estimated as follows" + }, + { + "type": "equation", + "bbox": [ + 0.386, + 0.108, + 0.826, + 0.142 + ], + "angle": 0, + "content": "\\[\nH = - \\sum_ {c _ {j} \\in \\{c \\}} p (c _ {j} | q) \\log p (c _ {j} | q). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.146, + 0.827, + 0.203 + ], + "angle": 0, + "content": "As proven by previous work, semantic entropy has a strong negative relationship with model accuracy, which can be used as an efficient measurement to detect unreliable LLM generations such as confabulation and hallucination [23, 24]. Motivated by this, we propose to leverage semantic entropy as an unsupervised optimization objective for incentivizing the reasoning capability of LLM." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.217, + 0.496, + 0.232 + ], + "angle": 0, + "content": "3.3 Entropy-Minimized Policy Optimization" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.242, + 0.827, + 0.382 + ], + "angle": 0, + "content": "We propose Entropy-Minimized Policy Optimization (EMPO), an RL-based method that optimizes the pre-trained large language model \\(\\pi_{\\theta}\\) to favor low semantic entropy responses given unlabeled user questions \\(\\{q_i\\}_{i=1}^n\\). Given input questions, EMPO incentivizes the outputs that belong to higher probability meaning cluster, and thus minimizes the semantic entropy over the meaning distribution. Specifically, given a question \\(q\\), our EMPO first samples a group of output \\(\\{o_1, \\ldots, o_G\\}\\) from the current model \\(\\pi_{\\theta}\\) and then merges them into a set of \\(M\\) meaning clusters \\(\\{c_1, \\ldots, c_M\\}\\). As we mentioned before, this can be done without notable computational cost (please refer to the quantitative results in Appendix F) by predefined rules such as N-gram, regular expressions or an additional small language model (SLM)\\(^1\\). Once built such a meaning set, EMPO approximately minimizes the semantic entropy \\(H\\) by maximizing the following objective" + }, + { + "type": "equation", + "bbox": [ + 0.22, + 0.386, + 0.826, + 0.428 + ], + "angle": 0, + "content": "\\[\n\\mathcal {J} _ {\\mathrm {E M P O}} = \\mathbb {E} _ {[ \\{q \\} \\sim P (Q), \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta} (O | q) ]} \\frac {1}{| G |} \\sum_ {i = 1} ^ {| G |} \\left(A _ {i}\\right), A _ {i} = \\frac {r _ {i} - m e a n \\left(\\left\\{r _ {1} , \\cdots , r _ {G} \\right\\}\\right)}{\\operatorname {s t d} \\left(r _ {1} , \\cdots , r _ {G}\\right)} \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.431, + 0.827, + 0.486 + ], + "angle": 0, + "content": "where \\( A_{i} \\) is the advantage of output \\( o_{i} \\) calculated by normalizing the rewards. Unlike GRPO in which the rewards is calculated depending on external supervision such as pre-defined rules or an reward model, in EMP0, the reward assigned for the \\( i \\)-th outputs \\( o_{i} \\) is the likelihood of its meaning cluster, i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.391, + 0.487, + 0.825, + 0.503 + ], + "angle": 0, + "content": "\\[\nr _ {i} = p \\left(c _ {j} \\mid q\\right), \\text {w h e r e} l \\left(o _ {i}\\right) = c _ {j}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.504, + 0.827, + 0.533 + ], + "angle": 0, + "content": "where the meaning likelihood \\( p(c_{j}|q) \\) is approximated by Eq. 3. Intuitively, the outputs convey higher-probability meanings are of higher advantages, and are therefore incentivized through training." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.538, + 0.827, + 0.689 + ], + "angle": 0, + "content": "How to Mitigate Potential Reward Hacking? Note that different from verifiable rule-based reward, which inherently resists reward hacking risks, optimizing unsupervised entropy objectives may permit trivial solutions. For instance, models could exploit the reward signal by overfitting to high-confident but wrong predictions for the most frequent semantic clusters without carefully reasoning process. To address this, we implement a straightforward entropy thresholding strategy, restricting optimization to prompts exhibiting moderate uncertainty via dual threshold criteria. Specifically, two entropy thresholds are deployed to filter out user queries \\( q \\) that result in overly high or low entropy unreliable answers. Extremely high entropy indicates that the model is highly uncertain, and thus its predictions are prone to be unreliable. In addition, continuously optimizing on responses with already low entropy is redundant and at the risk of overconfidence [27]. The final optimization objective of EMPO is" + }, + { + "type": "equation", + "bbox": [ + 0.326, + 0.692, + 0.573, + 0.711 + ], + "angle": 0, + "content": "\\[\n\\mathcal {J} _ {\\mathtt {E M P O}} = \\mathbb {E} _ {[ \\{q \\} \\sim P (Q), \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta} (O | q) ]}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.369, + 0.713, + 0.826, + 0.761 + ], + "angle": 0, + "content": "\\[\n\\left[ \\frac {1}{| G |} \\sum_ {i = 1} ^ {| G |} \\left(\\min \\left(A _ {i}, \\operatorname {c l i p} (1, 1 - \\epsilon , 1 + \\epsilon) A _ {i}\\right) \\right. \\right], \\tag {7}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.364, + 0.763, + 0.519, + 0.779 + ], + "angle": 0, + "content": "\\[\n\\mathrm {s . t .} \\delta_ {l o w} < H < \\delta_ {h i g h}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.782, + 0.827, + 0.853 + ], + "angle": 0, + "content": "where \\( H \\) is the semantic entropy defined in Eq. 4. The questions results in highly unreliable answers with entropy greater than \\( \\delta_{high} \\) are filtered out. Besides, we also filter out low-entropy answers to maintain the diversity of model outputs and further avoid potential reward hacking. Following previous work [28], we remove the KL constraint for better performance. \\( \\epsilon \\) clips extremely high or low advantages for stability similar to common practice." + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.86, + 0.827, + 0.913 + ], + "angle": 0, + "content": "1Such a SLM does not provide explicit or direct supervision signals regarding the correctness or quality of reasoning for a given query. The \"unsupervised\" nature of EMP0 refers to its independence from labeled (query, correct-answer) pairs or (query, valid-reasoning-trajectory) pairs for learning the reasoning task itself. More discussions are in Appendix I." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.09, + 0.314, + 0.108 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.122, + 0.37, + 0.138 + ], + "angle": 0, + "content": "4.1 Experimental Settings" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.149, + 0.825, + 0.193 + ], + "angle": 0, + "content": "We conduct experiments on multiple datasets including both closed-form math reasoning tasks and free-form natural reasoning tasks. Our EMP0 shows competitive performance by purely RL in a fully unsupervised manner compared to supervised finetuning and RL methods." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.197, + 0.827, + 0.337 + ], + "angle": 0, + "content": "Prompt Collection and Data Engineering. For mathematical reasoning, following the common practice [29, 8, 30], we adopt 20,000 prompts randomly selected from NuminaMath-CoT dataset [31] for training\\(^{2}\\) without additional data engineering. For free-form natural reasoning tasks, we adopt the prompts from Natural Reasoning\\(^{3}\\), a large-scale dataset consisting of diverse reasoning questions from multiple domains (e.g., Physics, Computer Science, Economics, Social Sciences and more). For training efficiency, we filter out the questions with over-long prompt or reference answer. Besides, taking inspiration from [32, 33, 34], we use the response length of Llama3.3-70B-Instruct as a difficulty estimation metric, and filter out samples with response lengths exceeding 4096 tokens. The remaining samples are simpler for stabilizing the training process. The final training subset is consisted of 18,000 questions. More details can be found in Appendix G." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.342, + 0.828, + 0.455 + ], + "angle": 0, + "content": "Evaluation. For mathematical reasoning, the performance is evaluated on a diverse suite of benchmarks including Minerva Math, MATH, AMC23, OlympaidBench and AIME24. The evaluation codebase is borrowed from the SimpleRL project [35], which is consistent with other concurrent works [30]. For free-form natural reasoning, we evaluate on MMLU-Pro [36] and GPQA [37] benchmarks, which consist of challenging reasoning-focused problems across various subjects, e.g., biology, business, chemistry, computer science and so on. We prompt the model to reason step by step and output the final answer within \"\\boxed{}\" and report the multi-choice accuracy. Without specific clarification, all evaluations are conducted using zero-shot prompting and greedy-decoding." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.459, + 0.828, + 0.613 + ], + "angle": 0, + "content": "Model training. For mathematical reasoning tasks, we train Qwen2.5-Math-1.5B and 7B Base models with our EMP0. The baselines we consider include supervised finetuning (SFT), online direct preference optimization (ODPO) [30] and the representative GRPO. We also compared with Qwen2.5-Math Instruction models for a more comprehensive comparison, where the instruction model is trained by iteratively supervised finetuning and RL on private data. For free-form natural reasoning tasks, we initialize from Qwen2.5-3B, 7B and 14B Base models. Different from mathematical reasoning, it is difficult to adopt rule-based reward for free-form question-answering tasks without deterministic golden answers. We consider the corresponding Instruct model, the Base model with or without few-shot CoT prompt as baselines. Besides, we also compare with SFT where the Base model is tuned to fit the response of Llama3.3-70B-Instruct. For more results on other model families beyond the Qwen series (e.g., Llama3), please refer to the Appendix D." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.624, + 0.825, + 0.651 + ], + "angle": 0, + "content": "- SFT: We train models by supervised finetuning via Open-Instruct [38] with a fixed learning rate of \\(1 \\times 10^{-6}\\), a global batch size of 128 and train for 1 epoch with a max length of 2048." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.66, + 0.825, + 0.755 + ], + "angle": 0, + "content": "- GRPO: We implement GRPO viaTRL [39] based on Open-R1 [29]. We sample 7 and 12 responses for each prompt for mathematical and natural reasoning tasks respectively. We train the model for 3 epochs with a maximum generation length of 2048. Following [40], we only use the rule-based accuracy reward and do not adopt format-reward. The accuracy reward is implemented as follows: If the response contains the correct final answer within \"boxed{}\", it receives a reward of 1. If the model prediction is wrong, it receives a reward of 0. When there is no answer can be extracted from the model's response, the reward is \\(-0.5\\)." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.763, + 0.823, + 0.819 + ], + "angle": 0, + "content": "- Online-DPO: Recent advanced Online-DPO first samples a set of responses and then verifies and selects the responses with highest reward and lowest reward as a preference pair. We directly copy the results from [30], where the model is trained for 7 iterations. Each iteration involves 2 training epochs and 20K training samples, i.e., 140K training samples in total." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.827, + 0.823, + 0.868 + ], + "angle": 0, + "content": "- EMP0: Most hyper-parameters of our method, e.g., number of generations, max generation length, batch size, learning rate are the same with GRPO. In mathematical reasoning tasks, we use a set of regular expressions to merge the outputs into meaning clusters. For" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.624, + 0.825, + 0.868 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.884, + 0.572, + 0.898 + ], + "angle": 0, + "content": "\\(^{2}\\)https://huggingface.co/datasets/RLHFlow/numa_prompt_dpo1" + }, + { + "type": "page_footnote", + "bbox": [ + 0.196, + 0.899, + 0.551, + 0.911 + ], + "angle": 0, + "content": "3https://huggingface.co/datasets/facebook/natural_reasoning" + }, + { + "type": "list", + "bbox": [ + 0.191, + 0.884, + 0.572, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.505, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.23, + 0.092, + 0.827, + 0.176 + ], + "angle": 0, + "content": "more general free-form natural reasoning, we leverage General-Verifier\\(^4\\) (a compact small language model with 1.5B parameters) to determine whether two outputs are of the same meaning or not following [23, 24]. A concrete example can be found in Appendix C. Specifically, if the final predictions (i.e., the contents within \"\\boxed{}\") of two model outputs are bidirectionally implicating, then we merge them into one semantic cluster ignoring their reasoning traces. More details are in Appendix E." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.192, + 0.307, + 0.206 + ], + "angle": 0, + "content": "4.2 Main Results" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.217, + 0.563, + 0.234 + ], + "angle": 0, + "content": "4.2.1 Performance on Mathematical Reasoning Tasks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.241, + 0.828, + 0.353 + ], + "angle": 0, + "content": "We conduct experiments on mathematical tasks to evaluate our method. The main results are shown in Table 1. EMP0 has successfully incentivized the Qwen2.5-Math Base model with reasoning capability without dependency on any external supervision. We observe a substantial improvement in the average performance on commonly used mathematical reasoning benchmarks from \\(28.1\\%\\) to \\(42.1\\%\\) and \\(30.7\\%\\) to \\(48.1\\%\\) on 1.5B and 7B models, respectively. Notably, through fully unsupervised RL training, the 1.5B and 7B model has both achieved competitive performance (42.1% and \\(48.1\\%\\)) near to Qwen2.5-Math-Instruct (40.5% and \\(49.4\\%\\)), where the latter depends on private dataset and multi-stage iteratively supervised fine-tuning and reinforcement learning." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.364, + 0.825, + 0.408 + ], + "angle": 0, + "content": "Table 1: Accuracy on mathematical reasoning benchmarks. We report the pass@1 accuracy tested with greedy decoding. The results of ODPO are directly copied from [30]. Here \\( q, r, a \\) denote the dependency on questions, human-verified reasoning traces and golden answers respectively." + }, + { + "type": "table", + "bbox": [ + 0.172, + 0.413, + 0.83, + 0.649 + ], + "angle": 0, + "content": "
SupervisionMATHMinerva MathOlympiad BenchAIME24AMC23Avg.
frontier model
Llama-3.1-70B-Instruct{q,r,a}64.635.331.916.730.135.7
Eurus-2-7B-PRIME{q,r,a}79.238.642.126.757.848.9
1.5B model
Qwen2.5-MathNone52.210.725.210.042.528.1
Qwen2.5-Math-Instruct{q,r,a}73.830.938.76.752.540.5
Qwen2.5-Math w/SFT{q,r,a}61.826.127.13.337.531.2
Qwen2.5-Math w/GRPO{q,a}75.232.033.616.752.542.0
Qwen2.5-Math w/EMPO{q}73.032.436.613.355.042.1
7B model
Qwen2.5-MathNone64.815.126.76.740.030.7
Qwen2.5-Math Instruct{q,r,a}82.843.841.216.762.549.4
Qwen2.5-Math w/SFT{q,r,a}72.234.633.210.045.039.0
Qwen2.5-Math w/ODPO{q,a}76.830.937.926.762.547.0
Qwen2.5-Math w/GRPO{q,a}77.839.739.120.057.546.8
Qwen2.5-Math w/EMPO{q}78.040.437.320.065.048.1
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.671, + 0.593, + 0.687 + ], + "angle": 0, + "content": "4.2.2 Performance on Natural Free-form Reasoning Tasks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.695, + 0.827, + 0.793 + ], + "angle": 0, + "content": "We present the results on free-form natural reasoning tasks in Table 2. On the MMLU-Pro benchmark, our EMP0 improves the accuracy from \\(32.1\\%\\) to \\(50.1\\%\\) and \\(32.7\\%\\) to \\(58.8\\%\\) on Qwen2.5-7B and 14B Base model respectively. Besides, on more challenging GPQA benchmark, EMP0 results in increasing accuracy from \\(15.9\\%\\) to \\(28.8\\%\\) on 7B model, \\(30.6\\%\\) to \\(35.3\\%\\) on 14B model. Notably, we observe that the SFT baseline fails to consistently improve model performance. We hypothesize that this is due to the noise in the reference responses within the Natural Reasoning training data (as mentioned by [32]). This phenomenon further underscores the practical potential of our proposed method." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.807, + 0.36, + 0.823 + ], + "angle": 0, + "content": "4.2.3 Training Dynamics" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.831, + 0.825, + 0.888 + ], + "angle": 0, + "content": "We further conduct experiments to investigate the reliability of our unsupervised reward signals. As shown in Figure 3, the unsupervised reward signals of EMP0 have a strongly negative correlation with the true rewards based on golden answers. Thus, by continuously minimizing the semantic entropy objective, the model can boost its accuracy in a fully unsupervised manner." + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.897, + 0.503, + 0.912 + ], + "angle": 0, + "content": "4https://huggingface.co/TIGER-Lab/general-verifier" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.088, + 0.825, + 0.131 + ], + "angle": 0, + "content": "Table 2: Accuracy results on free-form natural reasoning benchmarks. We report pass@1 accuracy tested with greedy decoding. Here \\(\\{q,r,a\\}\\) denote the dependency on questions, human-verified reasoning traces and verifiable golden answers respectively." + }, + { + "type": "table", + "bbox": [ + 0.188, + 0.137, + 0.812, + 0.436 + ], + "angle": 0, + "content": "
SupervisionMMLU ProGPQA
STEMHumanitiesSocialOtherAvg.
3B model
Qwen2.5-Base-8.325.357.424.156.8311.2
Qwen2.5-Base 5-shot{q,r,a}34.726.247.935.935.313.8
Qwen2.5-Instruct{q,r,a}44.830.756.047.144.528.2
Qwen2.5-Base w/SFT{q,r,a}19.810.428.018.419.111.5
Qwen2.5-Base w/GRPO{q,a}32.227.749.838.735.217.1
Qwen2.5-Base w/EMPO{q}31.726.248.136.734.120.6
7B model
Qwen2.5-Base-30.123.845.934.332.115.9
Qwen2.5-Base 5-shot{q,r,a}45.736.359.149.446.823.5
Qwen2.5-Instruct{q,r,a}56.938.164.158.655.235.3
Qwen2.5-Base w/SFT{q,r,a}32.67.115.830.125.622.4
Qwen2.5-Base w/GRPO{q,a}57.136.264.456.654.533.8
Qwen2.5-Base w/EMPO{q}52.434.659.050.950.128.8
14B model
Qwen2.5-Base-30.828.044.433.032.730.6
Qwen2.5-Base 5-shot{q,r,a}51.935.863.454.451.433.2
Qwen2.5-Instruct{q,r,a}63.647.173.866.762.942.9
Qwen2.5-Base w/SFT{q,r,a}37.027.840.238.036.128.5
Qwen2.5-Base w/GRPO{q,a}62.942.168.659.859.635.6
Qwen2.5-Base w/EMPO{q}61.441.668.360.058.835.3
" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.449, + 0.385, + 0.584 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.387, + 0.449, + 0.6, + 0.584 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.603, + 0.449, + 0.818, + 0.584 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.596, + 0.825, + 0.68 + ], + "angle": 0, + "content": "Figure 3: We visualize the training dynamics when tuning Qwen2.5-Math-7B Base model with EMP0 on 20K prompts randomly sampled from NuminaMath-CoT. The left illustrates the running average of semantic entropy (Eq. 4). The middle shows the trend of our unsupervised reward as defined by Eq. 6. The right shows the model accuracy on training data at each RL steps. Along the unsupervised RL-based training trajectory, EMP0 establishes a stable learning process with consistently decreased semantic entropy and improved accuracy." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.703, + 0.825, + 0.74 + ], + "angle": 0, + "content": "5 Discussion and Conclusion: The Role of Unsupervised Learning in Eliciting Pre-Trained Reasoning Capabilities" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.753, + 0.825, + 0.837 + ], + "angle": 0, + "content": "The strong empirical performance of EMP0, particularly its ability as a fully unsupervised method to match or even slightly outperform supervised counterparts like GRPO (as observed with the 7B model), prompts a deeper examination of how such reasoning incentivization mechanisms work. This is especially pertinent given the counterintuitive observation that these substantial improvements on benchmarks are achieved without a consistent increase in response length or clear evidence of an \"Aha moment\" – a hypothesized sudden emergence of enhanced reasoning capabilities." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.842, + 0.827, + 0.913 + ], + "angle": 0, + "content": "To dissect the nature of the improvements conferred by reinforcement learning (RL) post-training, we investigated its influence on pass@k accuracy. This metric is crucial as recent studies [41, 42] suggest that RL may not fundamentally expand the inherent reasoning capacities of LLMs beyond those already embedded in their pre-trained base. As depicted in Figure 4, our findings align with this perspective. Both GRPO and EMP0 significantly enhance pass@k scores for small to moderate" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.179, + 0.09, + 0.498, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.09, + 0.81, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.28, + 0.825, + 0.364 + ], + "angle": 0, + "content": "Figure 4: Pass@k curves of Qwen2.5-Math-7B Base model and its counterparts trained with GRPO and our EMP0 on Minerva Math and OMNI reasoning benchmarks. Pass@k measures the probability that at least 1 of the top \\( k \\) generated solutions is correct. Pass@1 is equivalent to accuracy, as it checks if the single solution is correct. When \\( k \\) is small, RL-trained models outperform the original base model. However, as \\( k \\) increases (e.g., into the tens or hundreds), the performance of the base models often converges with, or even exceeds, that of the RL-trained models." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.402, + 0.825, + 0.458 + ], + "angle": 0, + "content": "values of k (e.g., \\(k = 16\\) or 32) compared to the base model. This demonstrates an improved efficiency in surfacing correct reasoning paths with fewer attempts. However, as k becomes substantially large, the performance of these RL-trained models tends to converge with, and is sometimes surpassed by, that of the base model." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.463, + 0.827, + 0.575 + ], + "angle": 0, + "content": "This convergence at high \\( k \\) values, coupled with our qualitative observations that the base models themselves already exhibit sophisticated reasoning behaviors such as pausing, self-correction, and backtracking (see Appendix for examples), strongly indicates that the foundational reasoning pathways are largely pre-existing. Consequently, RL post-training, whether supervised or unsupervised like EMP0, appears to primarily refine the model's ability to efficiently access, prioritize, and consistently select these latent reasoning patterns, rather than instilling fundamentally novel ones. The observed improvements in pass@1 (accuracy) are thus likely a consequence of this enhanced sampling efficiency." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.581, + 0.825, + 0.706 + ], + "angle": 0, + "content": "These empirical insights from the pass@k analysis lend considerable support to the emerging consensus that pre-training shoulders the primary burden of endowing LLMs with their core abilities. We align our interpretation with prior insights from [43]: \"Pretraining does all the hard work. One big bet is that the pretraining phase grants all the abilities to the base LM, and finetuning is simply like a style transfer which positions the model to the right output space.\" Under this conjecture (or more precisely, an emerging, but not yet unanimously accepted consensus [41]), we attribute the efficacy of our method to the robust pretraining process of the Qwen2.5 Base model: If a base model possesses strong inherent reasoning capabilities, the subsequent challenge is not necessarily to teach it new reasoning skills from scratch, but rather to effectively elicit and guide these existing skills." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.712, + 0.825, + 0.795 + ], + "angle": 0, + "content": "EMPO's success highlights that intrinsic reward signals, derived purely from the model's objective to minimize semantic entropy and thus achieve greater consistency in its outputs, can be surprisingly potent for this elicitation process. In a well-pre-trained model, outputs that are semantically consistent are more likely to align with correct and coherent reasoning. EMPO leverages this by incentivizing the model to favor such consistent outputs, effectively guiding it to refine its selection from its collection of existing reasoning strategies without requiring external validation of correctness." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.801, + 0.827, + 0.912 + ], + "angle": 0, + "content": "In conclusion, while RL techniques, including EMP0, may not be forging entirely new fundamental reasoning capabilities beyond what pre-training provides, their role in significantly enhancing the sampling efficiency and reliability of accessing these pre-trained abilities is of paramount practical importance. Optimizing models for such efficiency is crucial for real-world applications. EMP0, by achieving this through a fully unsupervised framework, offers a particularly scalable, cost-effective, and practical approach to unlocking and refining the vast reasoning potential embedded within pre-trained LLMs, especially in domains where curated supervisory data is scarce or prohibitively expensive to obtain." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.09, + 0.269, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.115, + 0.826, + 0.159 + ], + "angle": 0, + "content": "[1] Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.161, + 0.827, + 0.204 + ], + "angle": 0, + "content": "[2] Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, et al. Process reinforcement through implicit rewards. arXiv preprint arXiv:2502.01456, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.207, + 0.827, + 0.248 + ], + "angle": 0, + "content": "[3] Lifan Yuan, Wendi Li, Huayu Chen, Ganqu Cui, Ning Ding, Kaiyan Zhang, Bowen Zhou, Zhiyuan Liu, and Hao Peng. Free process rewards without process labels. arXiv preprint arXiv:2412.01981, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.253, + 0.825, + 0.282 + ], + "angle": 0, + "content": "[4] Dejian Yang Daya Guo, Qihao Zhu. Deepseek-coder: When the large language model meets programming – the rise of code intelligence, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.285, + 0.825, + 0.327 + ], + "angle": 0, + "content": "[5] An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.33, + 0.825, + 0.36 + ], + "angle": 0, + "content": "[6] Jiaxin Huang, Shixiang Shane Gu, Le Hou, Yuexin Wu, Xuezhi Wang, Hongkun Yu, and Jiawei Han. Large language models can self-improve. arXiv preprint arXiv:2210.11610, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.362, + 0.827, + 0.404 + ], + "angle": 0, + "content": "[7] Ilia Shumailov, Zakhar Shumaylov, Yiren Zhao, Nicolas Papernot, Ross Anderson, and Yarin Gal. Ai models collapse when trained on recursively generated data. Nature, 631(8022):755-759, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.408, + 0.827, + 0.45 + ], + "angle": 0, + "content": "[8] Fangkai Jiao, Geyang Guo, Xingxing Zhang, Nancy F Chen, Shafiq Joty, and Furu Wei. Preference optimization for reasoning with pseudo feedback. arXiv preprint arXiv:2411.16345, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.453, + 0.827, + 0.483 + ], + "angle": 0, + "content": "[9] Wei Xiong, Hanning Zhang, Chenlu Ye, Lichang Chen, Nan Jiang, and Tong Zhang. Selfrewarding correction for mathematical reasoning. arXiv preprint arXiv:2502.19613, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.486, + 0.825, + 0.515 + ], + "angle": 0, + "content": "[10] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.518, + 0.827, + 0.561 + ], + "angle": 0, + "content": "[11] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.564, + 0.827, + 0.607 + ], + "angle": 0, + "content": "[12] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.609, + 0.825, + 0.652 + ], + "angle": 0, + "content": "[13] Ajay Patel, Markus Hofmarcher, Claudiu Leoveanu-Condrei, Marius-Constantin Dinu, Chris Callison-Burch, and Sepp Hochreiter. Large language models can self-improve at web agent tasks. arXiv preprint arXiv:2405.20309, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.655, + 0.827, + 0.696 + ], + "angle": 0, + "content": "[14] Siheng Li, Cheng Yang, Zesen Cheng, Lemao Liu, Mo Yu, Yujiu Yang, and Wai Lam. Large language models can self-improve in long-context reasoning. arXiv preprint arXiv:2411.08147, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.7, + 0.827, + 0.743 + ], + "angle": 0, + "content": "[15] Yuxin Zuo, Kaiyan Zhang, Shang Qu, Li Sheng, Xuekai Zhu, Biqing Qi, Youbang Sun, Ganqu Cui, Ning Ding, and Bowen Zhou. Trl: Test-time reinforcement learning. arXiv preprint arXiv:2504.16084, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.746, + 0.825, + 0.775 + ], + "angle": 0, + "content": "[16] Weizhe Yuan, Richard Yuanzhe Pang, Kyunghyun Cho, Xian Li, Sainbayar Sukhbaatar, Jing Xu, and Jason Weston. Self-rewarding language models, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.778, + 0.827, + 0.82 + ], + "angle": 0, + "content": "[17] Muning Wen, Cheng Deng, Jun Wang, Weinan Zhang, and Ying Wen. Entropy-regularized token-level policy optimization for large language models. arXiv e-prints, pages arXiv-2402, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.824, + 0.827, + 0.867 + ], + "angle": 0, + "content": "[18] Zhiheng Xi, Wenxiang Chen, Boyang Hong, Senjie Jin, Rui Zheng, Wei He, Yiwen Ding, Shichun Liu, Xin Guo, Junzhe Wang, et al. Training large language models for reasoning through reverse curriculum reinforcement learning. arXiv preprint arXiv:2402.05808, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[19] Zhaoyang Wang, Weilei He, Zhiyuan Liang, Xuchao Zhang, Chetan Bansal, Ying Wei, Weitong Zhang, and Huaxiu Yao. CREAM: Consistency regularized self-rewarding language models. In The Thirteenth International Conference on Learning Representations, 2025." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.115, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.828, + 0.121 + ], + "angle": 0, + "content": "[20] Yves Grandvalet and Yoshua Bengio. Semi-supervised learning by entropy minimization. Advances in neural information processing systems, 17, 2004." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.124, + 0.826, + 0.154 + ], + "angle": 0, + "content": "[21] Dequan Wang, Evan Shelhamer, Shaoteng Liu, Bruno Olshausen, and Trevor Darrell. Tent: Fully test-time adaptation by entropy minimization. arXiv preprint arXiv:2006.10726, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.157, + 0.825, + 0.186 + ], + "angle": 0, + "content": "[22] Qingyang Zhang, Yatao Bian, Xinke Kong, Peilin Zhao, and Changqing Zhang. Come: Test-time adaption by conservatively minimizing entropy. arXiv preprint arXiv:2410.10894, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.189, + 0.827, + 0.23 + ], + "angle": 0, + "content": "[23] Lorenz Kuhn, Yarin Gal, and Sebastian Farquhar. Semantic uncertainty: Linguistic invariances for uncertainty estimation in natural language generation. arXiv preprint arXiv:2302.09664, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.235, + 0.825, + 0.264 + ], + "angle": 0, + "content": "[24] Sebastian Farquhar, Jannik Kossen, Lorenz Kuhn, and Yarin Gal. Detecting hallucinations in large language models using semantic entropy. Nature, 630(8017):625-630, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.268, + 0.825, + 0.31 + ], + "angle": 0, + "content": "[25] Eugene Kharitonov, Rahma Chaabouni, Diane Bouchacourt, and Marco Baroni. Entropy minimization in emergent languages. In International Conference on Machine Learning, pages 5220-5230. PMLR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.314, + 0.825, + 0.344 + ], + "angle": 0, + "content": "[26] Ori Press, Ravid Shwartz-Ziv, Yann LeCun, and Matthias Bethge. The entropy enigma: Success and failure of entropy minimization. arXiv preprint arXiv:2405.05012, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.347, + 0.827, + 0.403 + ], + "angle": 0, + "content": "[27] Soren Mindermann, Jan M Brauner, Muhammed T Razzak, Mrinank Sharma, Andreas Kirsch, Winnie Xu, Benedikt Holgen, Aidan N Gomez, Adrien Morisot, Sebastian Farquhar, et al. Prioritized training on points that are learnable, worth learning, and not yet learnt. In International Conference on Machine Learning, pages 15630-15649. PMLR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.407, + 0.827, + 0.45 + ], + "angle": 0, + "content": "[28] Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. Dapo: An open-source llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.453, + 0.749, + 0.469 + ], + "angle": 0, + "content": "[29] Hugging Face. Open r1: A fully open reproduction of deepseek-r1, January 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.472, + 0.825, + 0.501 + ], + "angle": 0, + "content": "[30] Hanning Zhang, Jiarui Yao, Chenlu Ye, Wei Xiong, and Tong Zhang. Online-dpo-r1: Unlocking effective reasoning without the ppo overhead, 2025. Notion Blog." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.505, + 0.827, + 0.587 + ], + "angle": 0, + "content": "[31] Jia LI, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, Kashif Rasul, Longhui Yu, Albert Jiang, Ziju Shen, Zihan Qin, Bin Dong, Li Zhou, Yann Fleureau, Guillaume Lample, and Stanislas Polu. Numinamath. [https://huggingface.co/AI-MO/NuminaMath-CoT](https://github.com/project-numina/aimo-progress-prize/blob/main/report/numina_dataset.pdf), 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.592, + 0.827, + 0.635 + ], + "angle": 0, + "content": "[32] Weizhe Yuan, Jane Yu, Song Jiang, Karthik Padthe, Yang Li, Dong Wang, Ilia Kulikov, Kyunghyun Cho, Yuandong Tian, Jason E Weston, et al. Naturalreasoning: Reasoning in the wild with \\(2.8\\mathrm{m}\\) challenging questions. arXiv preprint arXiv:2502.13124, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.638, + 0.825, + 0.681 + ], + "angle": 0, + "content": "[33] Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. Do not think that much for \\(2 + 3 = ?\\) on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.685, + 0.825, + 0.713 + ], + "angle": 0, + "content": "[34] Huan Ma, Jingdong Chen, Guangyu Wang, and Changqing Zhang. Estimating llm uncertainty with logits. arXiv preprint arXiv:2502.00290, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.717, + 0.825, + 0.761 + ], + "angle": 0, + "content": "[35] Weihao Zeng, Yuzhen Huang, Wei Liu, Keqing He, Qian Liu, Zejun Ma, and Junxian He. 7b model and 8k examples: Emerging reasoning with reinforcement learning is both effective and efficient. https://hkust-nlp.notion.site/simplerl-reason, 2025. Notion Blog." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.764, + 0.827, + 0.82 + ], + "angle": 0, + "content": "[36] Yubo Wang, Xueguang Ma, Ge Zhang, Yuansheng Ni, Abhranil Chandra, Shiguang Guo, Weiming Ren, Aaran Arulraj, Xuan He, Ziyan Jiang, et al. Mmlu-pro: A more robust and challenging multi-task language understanding benchmark. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.824, + 0.825, + 0.866 + ], + "angle": 0, + "content": "[37] David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R Bowman. Gpqa: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[38] Nathan Lambert, Jacob Morrison, Valentina Pyatkin, Shengyi Huang, Hamish Ivison, Faeze Brahman, Lester James V. Miranda, Alisa Liu, Nouha Dziri, Shane Lyu, Yuling Gu, Saumya Malik, Victoria Graf, Jena D. Hwang, Jiangjiang Yang, Ronan Le Bras, Oyvind Tafjord, Chris" + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.828, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.208, + 0.091, + 0.826, + 0.121 + ], + "angle": 0, + "content": "Wilhelm, Luca Soldaini, Noah A. Smith, Yizhong Wang, Pradeep Dasigi, and Hannaneh Hajishirzi. Tulu 3: Pushing frontiers in open language model post-training. 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.176, + 0.124, + 0.827, + 0.167 + ], + "angle": 0, + "content": "[39] Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. Trl: Transformer reinforcement learning. https://github.com/huggingface/trl, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.171, + 0.826, + 0.213 + ], + "angle": 0, + "content": "[40] Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, and Heung-Yeung Shum Xiangyu Zhang. Open-reasoner-zero: An open source approach to scaling reinforcement learning on the base model. https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.217, + 0.825, + 0.259 + ], + "angle": 0, + "content": "[41] Yang Yue, Zhiqi Chen, Rui Lu, Andrew Zhao, Zhaokai Wang, Shiji Song, and Gao Huang. Does reinforcement learning really incentivize reasoning capacity in llms beyond the base model? arXiv preprint arXiv:2504.13837, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.263, + 0.827, + 0.305 + ], + "angle": 0, + "content": "[42] Yuda Song, Hanlin Zhang, Carson Eisenach, Sham Kakade, Dean Foster, and Udaya Ghai. Mind the gap: Examining the self-improvement capabilities of large language models. arXiv preprint arXiv:2412.02674, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.31, + 0.825, + 0.352 + ], + "angle": 0, + "content": "[43] Zhengxuan Wu, Aryaman Arora, Zheng Wang, Atticus Geiger, Dan Jurafsky, Christopher D Manning, and Christopher Potts. Reft: Representation finetuning for language models. Advances in Neural Information Processing Systems, 37:63908-63962, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.356, + 0.695, + 0.372 + ], + "angle": 0, + "content": "[44] George Casella and Roger Berger. Statistical inference. CRC press, 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.372 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.09, + 0.275, + 0.109 + ], + "angle": 0, + "content": "Appendices" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.125, + 0.826, + 0.141 + ], + "angle": 0, + "content": "A Prompt Templates 13" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.159, + 0.825, + 0.173 + ], + "angle": 0, + "content": "B Case Study 15" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.192, + 0.825, + 0.208 + ], + "angle": 0, + "content": "C Implementation Details about Semantic Clustering 16" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.226, + 0.825, + 0.24 + ], + "angle": 0, + "content": "D Additional Results on Llama3 Model Series 16" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.259, + 0.825, + 0.274 + ], + "angle": 0, + "content": "E Additional Training Details 18" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.293, + 0.825, + 0.308 + ], + "angle": 0, + "content": "F Computational Cost of Semantic Clustering 18" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.326, + 0.825, + 0.341 + ], + "angle": 0, + "content": "G Details of Prompt Collection 19" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.36, + 0.825, + 0.374 + ], + "angle": 0, + "content": "H Additional Result about Pass@k 19" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.393, + 0.825, + 0.408 + ], + "angle": 0, + "content": "I The Influence of Clustering Quality on the Performance of EMPO 19" + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.125, + 0.826, + 0.408 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.428, + 0.365, + 0.446 + ], + "angle": 0, + "content": "A Prompt Templates" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.459, + 0.607, + 0.475 + ], + "angle": 0, + "content": "We provide the prompt templates used for training and evaluation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.48, + 0.827, + 0.565 + ], + "angle": 0, + "content": "For mathematical reasoning tasks, we adopt the following reasoning prompt template similar to Online-DPO-R1 project [30] for both training and testing. During testing, we found that by adding system prompt, the accuracy of Qwen2.5-Math Base model can be better on mathematical benchmarks. However, system prompt would not help in natural reasoning tasks. Thus we use the same test prompt (start with system prompt) for both Base model and finetuned models in mathematical tasks. In natural reasoning tasks, we do not add system prompt for Base models." + }, + { + "type": "title", + "bbox": [ + 0.189, + 0.574, + 0.61, + 0.589 + ], + "angle": 0, + "content": "Mathematical Reasoning Training and Evaluation Template" + }, + { + "type": "code", + "bbox": [ + 0.187, + 0.599, + 0.713, + 0.695 + ], + "angle": 0, + "content": "system \nPlease reason step by step, and output your final answer within \\boxed{}}. \n \nuser \n{Question} Let's think step by step and output the final answer within \\boxed{}}. \n \nassistant" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.709, + 0.825, + 0.739 + ], + "angle": 0, + "content": "To train models with our EMPO for free-form natural reasoning tasks, we adopt the following reasoning prompt template similar to that we used in mathematical tasks for training." + }, + { + "type": "title", + "bbox": [ + 0.189, + 0.748, + 0.532, + 0.763 + ], + "angle": 0, + "content": "Free-form Natural Reasoning Training Template" + }, + { + "type": "code", + "bbox": [ + 0.187, + 0.773, + 0.691, + 0.87 + ], + "angle": 0, + "content": "system \nReason step by step, and output your final answer within \\boxed{}?. \n \nuser \n{Question} Reason step by step and output the final answer within \\boxed{}?. \n \nassistant" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.883, + 0.825, + 0.914 + ], + "angle": 0, + "content": "Since the MMLU-Pro and GPQA are both close-formed multi-choice benchmark. To evaluate the natural reasoning capability of the models, we use the following prompt template during testing." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.19, + 0.093, + 0.493, + 0.108 + ], + "angle": 0, + "content": "MMLU Pro Test Template for Base Models" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.117, + 0.809, + 0.145 + ], + "angle": 0, + "content": "Question: {Question} Reason step by step and output the final answer (the correct letter choice from A-P) within \\boxed{}}." + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.146, + 0.247, + 0.158 + ], + "angle": 0, + "content": "Answer:" + }, + { + "type": "title", + "bbox": [ + 0.189, + 0.209, + 0.446, + 0.224 + ], + "angle": 0, + "content": "Few Shot MMLU Pro Test Template" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.233, + 0.809, + 0.262 + ], + "angle": 0, + "content": "Question: {Question in Demonstration 1} Reason step by step and output the final answer (the correct letter choice from A-P) within \\boxed{}" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.262, + 0.809, + 0.289 + ], + "angle": 0, + "content": "Answer: Let's reason step by step. CoT of Demonstration 1 Therefore, the correct answer is Answer of Demonstration 1." + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.298, + 0.475, + 0.318 + ], + "angle": 0, + "content": "(Omit more demonstrations for readability)" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.325, + 0.809, + 0.36 + ], + "angle": 0, + "content": "... Question: {Question} Reason step by step and output the final answer (the correct letter choice from A-P) within \\boxed{}}." + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.36, + 0.247, + 0.371 + ], + "angle": 0, + "content": "Answer:" + }, + { + "type": "title", + "bbox": [ + 0.189, + 0.422, + 0.632, + 0.437 + ], + "angle": 0, + "content": "MMLU Pro Test Template for Finetuned Models (SFT and RL)" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.447, + 0.809, + 0.489 + ], + "angle": 0, + "content": "system \nReason step by step, and output your final answer (the correct letter choice from A-P) within \n\\boxed{}" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.489, + 0.295, + 0.502 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.503, + 0.297, + 0.515 + ], + "angle": 0, + "content": "user" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.516, + 0.809, + 0.544 + ], + "angle": 0, + "content": "{Question} Reason step by step and output the final answer (the correct letter choice from A-P) within \\boxed{}" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.544, + 0.264, + 0.557 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.558, + 0.325, + 0.571 + ], + "angle": 0, + "content": "assistant" + }, + { + "type": "title", + "bbox": [ + 0.189, + 0.623, + 0.446, + 0.637 + ], + "angle": 0, + "content": "GPQA Test Prompt for Base Models" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.647, + 0.809, + 0.675 + ], + "angle": 0, + "content": "Question: {Question} Reason step by step and output the final answer (A, B, C, or D) within \\boxed{}" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.676, + 0.247, + 0.688 + ], + "angle": 0, + "content": "Answer:" + }, + { + "type": "title", + "bbox": [ + 0.189, + 0.739, + 0.514, + 0.754 + ], + "angle": 0, + "content": "Few Shot GPQA Test Prompt for Base Models" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.763, + 0.81, + 0.792 + ], + "angle": 0, + "content": "Question: {Question in Demonstration 1} Reason step by step and output the final answer (A, B, C, or D) within \\boxed{}" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.792, + 0.809, + 0.819 + ], + "angle": 0, + "content": "Answer: Let's reason step by step. CoT in Demonstration 1 Therefore, the correct final answer is Answer in Demonstration 1." + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.826, + 0.468, + 0.848 + ], + "angle": 0, + "content": "(Omit more demonstration for readability)" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.855, + 0.809, + 0.888 + ], + "angle": 0, + "content": "... Question: {Question} Reason step by step and output the final answer (A, B, C, or D) within \\boxed{}" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.889, + 0.247, + 0.901 + ], + "angle": 0, + "content": "Answer:" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.189, + 0.093, + 0.586, + 0.108 + ], + "angle": 0, + "content": "GPQA Test Prompt for Finetuned Models (SFT and RL)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.118, + 0.314, + 0.132 + ], + "angle": 0, + "content": "system" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.132, + 0.732, + 0.146 + ], + "angle": 0, + "content": "Reason step by step, and output your final answer (A, B, C, or D) within \\boxed{}" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.146, + 0.264, + 0.159 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.161, + 0.297, + 0.173 + ], + "angle": 0, + "content": "user" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.173, + 0.793, + 0.187 + ], + "angle": 0, + "content": "{Question} Reason step by step and output the final answer (A, B, C, or D) within \\boxed{}" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.188, + 0.265, + 0.2 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.201, + 0.325, + 0.214 + ], + "angle": 0, + "content": "assistant" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.245, + 0.304, + 0.263 + ], + "angle": 0, + "content": "B Case Study" + }, + { + "type": "title", + "bbox": [ + 0.189, + 0.282, + 0.619, + 0.297 + ], + "angle": 0, + "content": "Qwen2.5-Math-7B's Answer to the 18-th Question in AMC23" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.306, + 0.81, + 0.405 + ], + "angle": 0, + "content": "Question: Janet rolls a standard 6-sided die 4 times and keeps a running total of the numbers she rolls. What is the probability that at some point, her running total will equal 3? The final answer can be written in the form \\(\\frac{m}{n}\\), where \\(m\\) and \\(n\\) are relatively prime positive integers. What is \\(m + n\\)? Response: To solve this problem, we need to determine the probability that at some point during 4 rolls of a standard 6-sided die, the running total will equal 3. We can use dynamic programming to keep track of the probabilities of each possible running total after each roll. Here's the plan:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.405, + 0.801, + 0.418 + ], + "angle": 0, + "content": "1. Initialize a list to keep track of the probability of each possible running total after each roll." + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.418, + 0.79, + 0.431 + ], + "angle": 0, + "content": "2. Start with the initial condition that the probability of a running total of 0 after 0 rolls is 1." + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.432, + 0.807, + 0.459 + ], + "angle": 0, + "content": "3. For each roll, update the probabilities of each possible running total by considering the outcomes of rolling a 1, 2, 3, 4, 5, or 6." + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.459, + 0.691, + 0.473 + ], + "angle": 0, + "content": "4. After each roll, check if the probability of a running total of 3 is non-zero." + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.473, + 0.808, + 0.501 + ], + "angle": 0, + "content": "5. The final answer will be the probability of a running total of 3 after 4 rolls. Let's implement this in Python:" + }, + { + "type": "list", + "bbox": [ + 0.188, + 0.405, + 0.808, + 0.501 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.508, + 0.205, + 0.514 + ], + "angle": 0, + "content": "···" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.515, + 0.466, + 0.529 + ], + "angle": 0, + "content": "(We omit the python code for readability.)" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.535, + 0.205, + 0.541 + ], + "angle": 0, + "content": "···" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.542, + 0.808, + 0.583 + ], + "angle": 0, + "content": "It looks like there was an issue with the implementation. The probability of the running total being exactly 3 after 4 rolls should not be zero, so let's correct the approach and re-implement the solution. Here's the corrected plan:" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.584, + 0.801, + 0.597 + ], + "angle": 0, + "content": "1. Initialize a list to keep track of the probability of each possible running total after each roll." + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.597, + 0.789, + 0.611 + ], + "angle": 0, + "content": "2. Start with the initial condition that the probability of a running total of 0 after 0 rolls is 1." + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.611, + 0.808, + 0.638 + ], + "angle": 0, + "content": "3. For each roll, update the probabilities of each possible running total by considering the outcomes of rolling a 1, 2, 3, 4, 5, or 6." + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.639, + 0.69, + 0.652 + ], + "angle": 0, + "content": "4. After each roll, check if the probability of a running total of 3 is non-zero." + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.652, + 0.696, + 0.666 + ], + "angle": 0, + "content": "5. The final answer will be the probability of a running total of 3 after 4 rolls." + }, + { + "type": "list", + "bbox": [ + 0.188, + 0.584, + 0.808, + 0.666 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.667, + 0.416, + 0.68 + ], + "angle": 0, + "content": "Let's re-implement this in Python:" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.689, + 0.205, + 0.695 + ], + "angle": 0, + "content": "···" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.694, + 0.466, + 0.708 + ], + "angle": 0, + "content": "(We omit the python code for readability.)" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.719, + 0.808, + 0.748 + ], + "angle": 0, + "content": "It seems there is still an issue with the implementation. Let's manually check the logic and correct it step by step." + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.749, + 0.808, + 0.775 + ], + "angle": 0, + "content": "1. Initialize the probability list with the initial condition that the probability of a running total of 0 after 0 rolls is 1." + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.776, + 0.808, + 0.803 + ], + "angle": 0, + "content": "2. For each roll, update the probabilities of each possible running total by considering the outcomes of rolling a 1, 2, 3, 4, 5, or 6." + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.804, + 0.691, + 0.817 + ], + "angle": 0, + "content": "3. After each roll, check if the probability of a running total of 3 is non-zero." + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.818, + 0.808, + 0.846 + ], + "angle": 0, + "content": "4. The final answer will be the probability of a running total of 3 after 4 rolls. Let's manually simulate the process: - After 0 rolls: Probability of 0 is 1" + }, + { + "type": "list", + "bbox": [ + 0.188, + 0.749, + 0.808, + 0.846 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.853, + 0.204, + 0.858 + ], + "angle": 0, + "content": "···" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.859, + 0.808, + 0.903 + ], + "angle": 0, + "content": "The probability of a running total of 3 after 4 rolls is \\( 4 / 1296 \\), which simplifies to \\( 1 / 324 \\). Therefore, the final answer in the form of \\( m / n \\) is \\( 1 / 324 \\), and the sum \\( m + n \\) is \\( 1 + 324 = 325 \\). The final answer is \\( \\boxed{325} \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.09, + 0.636, + 0.109 + ], + "angle": 0, + "content": "C Implementation Details about Semantic Clustering" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.119, + 0.828, + 0.177 + ], + "angle": 0, + "content": "We detail the implementation of semantic clustering in our EMP0. The semantic cluster process is shown as Algorithm 1. We also provide the simplified python code to show how we determine whether two model outputs are equivalent or not in mathematical reasoning (Algorithm 2) and free-form natural reasoning tasks (Algorithm 3)." + }, + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.192, + 0.404, + 0.208 + ], + "angle": 0, + "content": "Algorithm 1: Semantic Clustering" + }, + { + "type": "code", + "bbox": [ + 0.159, + 0.21, + 0.709, + 0.436 + ], + "angle": 0, + "content": "Require : question \\(q\\) , a group set of model response \\(\\{o_2,\\dots,o_G\\}\\) , verifier \\(\\nu\\) Initialize: \\(C = \\{o_1\\}\\) \nfor \\(2\\leq i\\leq G\\) do \nfor \\(c\\in C\\) do // Random choose one element from \\(c\\) for comparison \\(o_c = c[0]\\) // Is the meaning of old sequence equivalent to new one? if \\(\\mathcal{V}(q,o_c,o_i) ==\\) True then // Put into existing class \\(c = c\\cup \\{o_i\\}\\) break \nend \nend \n// \\(o_i\\) is semantically distinct, belongs to a novel cluster. \n\\(C\\gets C\\cup \\{o_i\\}\\) \nend \nReturn :C" + }, + { + "type": "code_caption", + "bbox": [ + 0.173, + 0.458, + 0.665, + 0.473 + ], + "angle": 0, + "content": "Algorithm 2: Implementation of verifier for mathematical reasoning tasks." + }, + { + "type": "code", + "bbox": [ + 0.186, + 0.484, + 0.714, + 0.564 + ], + "angle": 0, + "content": "from math_VERIFY import parse, verify \ndef are_equivalent (model_output_1, model_output_2) prediction_1 \\(=\\) parse(model_output_1) prediction_2 \\(=\\) parse(model_output_2) return verify(prediction_1,prediction_2)" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.592, + 0.575, + 0.608 + ], + "angle": 0, + "content": "D Additional Results on Llama3 Model Series" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.622, + 0.828, + 0.721 + ], + "angle": 0, + "content": "We conduct additional experiments to validate the efficacy of our EMP0 on other model series beyond Qwen2.5. The results are shown in Table 3. Consistent with other concurrent practice, we are unable to implement R1-Zero-like training on the Llama series, i.e., directly initializing RL process from the Base model without SFT). Thus, we instead consider a semi-supervised learning approach by initializing from instruct-tuned model and enhance the reasoning capability with our EMP0. As shown in Table 3, when initialize from Llama3.2-3B-Instruct model, our EMP0 can also substantially improve reasoning capability of instruct-tuned model which have undergone carefully-designed post-training." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.725, + 0.825, + 0.74 + ], + "angle": 0, + "content": "Why Qwen2.5 Base model can initialize fully unsupervised RL training, while Llama3 can not?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.746, + 0.828, + 0.913 + ], + "angle": 0, + "content": "Consistent with open-source community practices, we found that R1-Zero-like RL training can only be reproduced unsupervised on Qwen2.5 series Base models. In contrast, Llama3 series model still necessitate \"cold-start\", i.e., SFT, before RL. Specifically, in our experiments, the Qwen2.5 Base models demonstrated inherent answer consistency from the initial stages of EMPO training. However, Llama3 series Base models suffer severe inconsistency and fail to convergence during training. We hypothesize this divergence stems from Qwen2.5's pretraining strategy. As mentioned in the technical report [5], the pretrain data corpus are mixed with both web text and QA pairs generated by instruct-tuned Qwen2 models. This endows Qwen2.5 Base models with native instruction-following capabilities. Experimental evidence supports this hypothesis. As shown in Table 2, Qwen2.5 Base models successfully follow the instruction such as \"put the final answer (A-P) within box\" when answering multiple-choice questions from MMLU Pro and achieve an accuracy notably higher than random guess." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "code_caption", + "bbox": [ + 0.173, + 0.185, + 0.623, + 0.2 + ], + "angle": 0, + "content": "Algorithm 3: Implementation of verifier for natural reasoning tasks." + }, + { + "type": "code", + "bbox": [ + 0.182, + 0.208, + 0.862, + 0.808 + ], + "angle": 0, + "content": "```cpp\n{\n verifier = AutoModelForCausalLM.from_pretrained(...);\n tokenizer = AutoTokenizer.from_pretrained(...);\n}\ndef are_equivalent(model_output_1, model_output_2, question, verifier)\n prediction_1 = parse(model_output_1)\n prediction_2 = parse(model_output_2)\n prompt = (\n f\"User: ## Question: {question}\\n\\n\"\n f\"## Ground Truth Answer: {prediction_1}\\n\\n\"\n f\"## Student Answer: {prediction_2}\\n\\n\"\n \"For the above question, please verify if the student's answer is equivalent to the ground truth answer.\\n\"\n \"Do not solve the question by yourself; just check if the student's answer is equivalent to the ground truth answer.\\n\"\n \"If correct, output Final Decision: Yes\".\n \"If incorrect, output Final Decision: No\\..\\n\"\n \"Assistant: Final Decision: \"\n )\n inputs = selftokenizer(modified_prompt, return_tensors=\"pt\").to(self.model_device)\n input_ids = inputs-input_ids\n # inference for output logits\n with torch.inference_mode():\n outputs = self.model.forward(input_ids)\n logits = outputs.logits\n # get next output logits\n next_token_logits = logits[0, input_ids.shape[1] - 1, :]\n # get the token ID of \"Yes\" and \"No\"\n decision_tokens = selftokenizer(\"Yes\", \"No\")\n yes_id = decision_tokens.input_ids[0]\n no_id = decision_tokens.input_ids[1]\n # calculate probability\n probs = torch softmax(next_token_logits, dim=0)\n yes_prob = probs[yes_id].item()\n no_prob = probs[no_id].item()\n return yes_prob > no_prob" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.303, + 0.088, + 0.694, + 0.104 + ], + "angle": 0, + "content": "Table 3: Accuracy on mathematical reasoning benchmarks." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.109, + 0.83, + 0.283 + ], + "angle": 0, + "content": "
SupervisionMATHMinerva MathOMNIAIME24AMC23Avg.
frontier model
Llama-3.1-70B-Instruct{q,r,a}64.635.331.916.730.135.7
Eurus-2-7B-PRIME{q,r,a}79.238.642.126.757.848.9
1B model
Llama3.2-InstructNone27.25.15.60.010.09.6
Llama3.2-Instruct w/GRPO{q,a}29.83.76.40.012.510.5
Llama3.2-Instruct w/EMPO{q}31.05.17.93.37.511.0
3B model
Llama3.2-InstructNone46.219.115.33.320.020.8
Llama3.2-Instruct w/GRPO{q,a}49.222.417.613.332.527.0
Llama3.2-Instruct w/EMPO{q}49.820.218.413.330.026.3
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.314, + 0.441, + 0.332 + ], + "angle": 0, + "content": "E Additional Training Details" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.351, + 0.828, + 0.381 + ], + "angle": 0, + "content": "We provide a brief summary of our training recipes in Table 4. Besides, we have release the code in the supplementary materials which contained the full training configurations for re-implementation." + }, + { + "type": "table_caption", + "bbox": [ + 0.265, + 0.399, + 0.733, + 0.415 + ], + "angle": 0, + "content": "Table 4: A brief summary of training recipes of Qwen2.5 Base models." + }, + { + "type": "table", + "bbox": [ + 0.27, + 0.422, + 0.732, + 0.503 + ], + "angle": 0, + "content": "
1.5B-Math7B-Math3B7B14B
Number of generations77121212
Learning rate3e-73e-73e-73e-73e-7
Max completion length2048204810241024768
Batch size per GPU12111
" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.547, + 0.577, + 0.566 + ], + "angle": 0, + "content": "F Computational Cost of Semantic Clustering" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.584, + 0.828, + 0.751 + ], + "angle": 0, + "content": "Given the number of responses sampled per question \\( G \\) (i.e., the group size) and the training dataset size \\( N \\), the time complexity of the clustering process is \\( O(G^2 \\times N) \\). In mathematical reasoning tasks, semantic clustering is implemented by regular expressions which do not involve notable computational cost. For natural reasoning tasks, we rely on an additional compact small language model. To evaluate the additional computational overhead introduced by semantic clustering in EMPO, we conducted comparative analyses of EMPO and GRPO in terms of total training duration and GPU memory utilization. The results of mathematical reasoning and natural reasoning are shown in Table 6, respectively. It is worthy to note that the 14B model experiments require slightly less computational time than the 7B model. This is because, in our 14B experiments, we reduced the batch size and maximum response length from 2 and 1024 to 1 and 768, respectively, compared to the 3B and 7B configurations. This adjustment was made to fit the limited GPU memory of one single \\( 8 \\times \\mathrm{A}100 \\) 80G machine." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.769, + 0.828, + 0.813 + ], + "angle": 0, + "content": "Table 5: Comparison of total runtime (measured as \\(8 \\times\\) A100 GPU hours) and storage cost (measured by max total GPU memory (GiB) utilization) between GRPO and EMP0. The GPU Memory semantic cluster process requires minimal computation and storage." + }, + { + "type": "table", + "bbox": [ + 0.296, + 0.818, + 0.703, + 0.894 + ], + "angle": 0, + "content": "
Qwen2.5-1.5B-MathQwen2.5-7B-Math
GPU HoursGPU MemGPU HoursGPU Mem
GRPO11.2240.48.5501.3
EMPO11.7208.28.7532.7
" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.088, + 0.825, + 0.131 + ], + "angle": 0, + "content": "Table 6: Comparison of total runtime (measured as \\(8 \\times\\) A100 GPU hours) and storage cost (measured by total GPU memory (GiB) utilization) between GRPO and EMP0. The GPU Memory semantic cluster process requires minimal computation and storage." + }, + { + "type": "table", + "bbox": [ + 0.209, + 0.138, + 0.789, + 0.213 + ], + "angle": 0, + "content": "
Qwen2.5-3BQwen2.5-7BQwen2.5-14B
GPU HoursGPU MemGPU HoursGPU MemGPU HoursGPU Mem
GRPO9.5274.812.4508.611.0588.2
EMPO11.1286.914.6532.711.5541.1
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.235, + 0.45, + 0.253 + ], + "angle": 0, + "content": "G Details of Prompt Collection" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.266, + 0.828, + 0.337 + ], + "angle": 0, + "content": "For mathematical reasoning, we directly use 20,000 prompts randomly selected from Numina-Math-CoT. For free-form natural reasoning tasks, we adopt the prompts from Natural Reasoning5 by filtering out the questions with over-long prompt, reference answer. Besides, we use the response length of Llama3.3-70B-Instruct as a difficulty estimation metric, and filter out overly difficult samples with response lengths exceeding 4096 tokens. The data collection python code is demonstrated as follow:" + }, + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.348, + 0.637, + 0.364 + ], + "angle": 0, + "content": "Algorithm 4: Python code of data filtering in a huggingface-like style." + }, + { + "type": "code", + "bbox": [ + 0.18, + 0.374, + 0.717, + 0.617 + ], + "angle": 0, + "content": "from datasets import load_dataset \ndataset = load_dataset(\"facebook/Natural-Reasoning\") \nfiltered_dataset = dataset.filter( lambda x: (\n # no answer\n len(x[\"reference_answer\"])) > 0\n # over-long answer\n and len(x[\"reference_answer\"]} < 129\n # overly difficult questions\n and len(x[\"llamaresponses\"]} < 4096\n # over-long prompt\n and len(x[\"question\"]} < 512\n # proof-oriented\n and (\"prove\" not in x[\"question\"].lower())\n and (\"proof\" not in x[\"question\"].lower())\n)" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.647, + 0.482, + 0.663 + ], + "angle": 0, + "content": "H Additional Result about Pass@k" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.678, + 0.825, + 0.72 + ], + "angle": 0, + "content": "We provide additional visualization pass@k results of models trained with EMP0. The results are shown as follow. As shown in Figure H, the Base model consistently catch up with RL trained models when k is large." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.739, + 0.755, + 0.757 + ], + "angle": 0, + "content": "I The Influence of Clustering Quality on the Performance of EMPO" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.77, + 0.827, + 0.828 + ], + "angle": 0, + "content": "In our mathematical reasoning experiments, semantic clustering is achieved solely through regular expression matching without introducing additional models. Due to the naturally structured response formats in mathematical tasks, regular expression could accurately determine answer equivalence, resulting in relatively high clustering quality." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.832, + 0.826, + 0.889 + ], + "angle": 0, + "content": "However, in more general free-form natural reasoning tasks where model responses are free-form much more diverse (e.g., matrix, numbers, a few lines of sentences/codes...), the clustering quality can impact EMPO's effectiveness. For instance, in our more early practice, we tried DeBERTa (a bert-like model with 300M parameters trained by microsoft) for semantic clustering. Due to" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.897, + 0.551, + 0.913 + ], + "angle": 0, + "content": "5https://huggingface.co/datasets/facebook/natural_reasoning" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.182, + 0.096, + 0.482, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.275, + 0.49, + 0.303 + ], + "angle": 0, + "content": "Figure 5: Trend of pass@k accuracy on Math test-set." + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.096, + 0.817, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.275, + 0.825, + 0.303 + ], + "angle": 0, + "content": "Figure 6: Trend of pass@k accuracy on OMNI test-set." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.326, + 0.827, + 0.425 + ], + "angle": 0, + "content": "the poor quality of semantic clustering, our EMPO straggled to scale up and suffered from frequent reward hacking. Subsequently, by leveraging the general-verifier released by Tiger-Lab (a fine-tuned Qwen2.5-1.5B-Math model) for clustering, we successfully generalized EMPO to more general free-form reasoning tasks. Noted that even though this small language model undergoes supervised finetuning, it serves within our fully unsupervised framework as a fixed utility function for semantic comparison, rather than serving as an external supervisor for task-specific feedback. There are several fundamental difference between cluster model and the reward model used in supervised RL:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.435, + 0.825, + 0.49 + ], + "angle": 0, + "content": "- The cluster model does not evaluate output correctness relative to input queries. It just provides pairwise comparisons between the model's own outputs. That is, it only provides binary answer about \"whether these two answers are the same?\" rather than \"which answer is better?\"." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.495, + 0.825, + 0.524 + ], + "angle": 0, + "content": "- The cluster model does not provide any guidance, such as gradient information or hints on how to refine the reasoning traces." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.528, + 0.825, + 0.584 + ], + "angle": 0, + "content": "- Compared to reward model or human-verifier golden answers, it can be much easier to implement such a cluster model. For example, in mathematical reasoning tasks, only regular expressions are enough for clustering. In natural reasoning tasks, a finetuned Qwen2.5-1B model can provide high quality semantic cluster results." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.435, + 0.825, + 0.584 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.594, + 0.827, + 0.651 + ], + "angle": 0, + "content": "Essentially, this is related to the non-identifiability problem in statistical inference [44]. The issue of non-identifiability arises because multiple, distinct underlying states (potential \"truths,\" or more accurately, different reasoning pathways or different clusters of incorrect answers) could produce the same pattern of relational signals (i.e., the same semantic clustering results)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "20" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05812/2ebd62c4-e647-47e7-bb58-1c94267578a3_origin.pdf b/data/2025/2504_05xxx/2504.05812/2ebd62c4-e647-47e7-bb58-1c94267578a3_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..79159f3900f741071a562f0c1422c6fd76273665 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/2ebd62c4-e647-47e7-bb58-1c94267578a3_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4618d46a0f5161a05e9e00f291fc32785c9208893105dfde100f409bc0f75acd +size 5443218 diff --git a/data/2025/2504_05xxx/2504.05812/full.md b/data/2025/2504_05xxx/2504.05812/full.md new file mode 100644 index 0000000000000000000000000000000000000000..bbfb74740cb4da45efb5767f5b32935dfa3b3955 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/full.md @@ -0,0 +1,552 @@ +# Right Question is Already Half the Answer: Fully Unsupervised LLM Reasoning Incentivization + +Qingyang Zhang + +Tianjin University + +Haitao Wu + +Tianjin University + +Changqing Zhang + +Tianjin University + +Peilin Zhao + +Tencent AI Lab + +Yatao Bian + +Tencent AI Lab & NUS + +# Abstract + +Existing methods to enhance the reasoning capability of large language models predominantly rely on supervised fine-tuning (SFT) followed by reinforcement learning (RL) on reasoning-specific data. These approaches critically depend on external supervisions—such as labeled reasoning traces, verified golden answers, or pre-trained reward models. In this work, we propose Entropy Minimized Policy Optimization (EMPO), which makes an early attempt at fully unsupervised LLM reasoning incentivization. By continuously minimizing the predictive entropy of LLMs on unlabeled questions in a latent semantic space, EMP0 achieves competitive performance compared to supervised counterparts on both mathematical and freeform natural reasoning tasks. Specifically, without any supervised signals, EMP0 boosts the accuracy of Qwen2.5-Math-7B Base from $30.7\%$ to $48.1\%$ on mathematical benchmarks and improves the accuracy of Qwen2.5-7B Base from $32.1\%$ to $50.1\%$ on MMLU-Pro. Primary experiments and analysis are also provided to interpret the effectiveness of EMP0. Code is available at this url. + +# 1 Introduction + +Large language models (LLMs) have demonstrated exceptional potential in challenging tasks such as mathematical reasoning [1, 2, 3] and code generation [4]. A prevailing paradigm for training reasoning LLMs involves firstly performing supervised fine-tuning (SFT) and then reinforcement learning (RL), or iterative combinations of both, applied to reasoning-specific datasets after pretraining [5]. Unfortunately, these methods typically depend on large-scale reasoning datasets with various forms of supervised information, such as human-labeled reasoning traces, verified golden answers, or an additional pre-trained re + +ward model. As a consequence, endowing LLMs with powerful reasoning capability through human experts is becoming increasingly time-consuming and costly, which greatly limits the scalability and broader adoption of reasoning models. + +To mitigate this, previous work employs self-consistency to construct pseudo data and deploy supervised finetuning for better performance [6]. However, the performance improvement is limited and under risks of model collapse [7]. Recent advancements, such as the pioneering work PFPO [8], frame the labeling of solutions as evaluation against test cases and then leverage self-consistency + +![](images/02778710b12d49b556848c17b7b983ca69c0dffbf0dd66e3238c493a9a9401fe.jpg) +Figure 1: Improvement of the proposed method on Qwen2.5-7B and Qwen2.5-7B-Math model. + +to generate pseudo test cases. Despite the promising results, the proposed method still necessitates supervision from instruction finetuning data and supervision signals from the frontier LLMs to initialize the RL process. Another more recent work [9] introduces a two-stage framework to construct self-rewarding reasoning models using self-generated data followed by RL. Despite the superior performance, the proposed method relies on a ground-truth verifier to obtain self-correction reasoning traces by rejection sampling. These approaches inspire our exploration of a critical open question: How can we incentivize LLM reasoning capacities in a fully unsupervised manner? + +![](images/500ae90fc55e78382e716a769af6e3fc959b41fea3452ae5163b8e88a0534184.jpg) +(a) Comparison of different RL methods + +![](images/60bb097db42d16f1849c828d22d75124301e0a1142c86a96f7d7e2b0c7a98f8f.jpg) +(b) Overview of EMPO +Figure 2: Overview of the proposed method. (a) Previous method like PPO [10] or GRPO [11] typically relies on external supervised signals, e.g., a pretrained reward model or golden answers. (b) The proposed Entropy Minimized Policy Optimization (EMPO) samples a set of responses from the current policy model, and then builds semantic clusters according to their equivalence. By continuously minimizing the entropy at a meaning level, our method achieves competitive benchmark performance without any external supervision, i.e., rule-based reward, pre-defined test cases or an pre-trained reward model. + +Recent advanced DeepSeek-R1-Zero [12] demonstrates robust reasoning capabilities without dependency on SFT data. By directly initiating RL from the base model, DeepSeek-R1-Zero autonomously evolves sophisticated reasoning behaviors such as reflection and self-critic by exploring the reward signals provided by rule-based rewards. i.e., verified golden answers or an additional pre-trained reward model. Inspired by the success of DeepSeek-R1-Zero, our motivation is to devise a fully unsupervised approach for powerful reasoning capability. Specifically, we propose a novel reinforcement learning algorithm termed as Entropy Minimized Policy Optimization (EMP0), which incentivizes the reasoning capability of LLMs in a fully unsupervised manner by minimizing their predictive entropy in a latent semantic space. This method optimizes the model to favor reasoning traces yielding consistent answers, enhancing output reliability. The semantic entropy objective we propose to minimize is a well-established measurement of LLMs' uncertainty, which extends beyond mathematical reasoning to free-form question-answering tasks. We further introduce entropy thresholding to filter unreliable reasoning traces, stabilizing the unsupervised training process. Experiments on various tasks including mathematical reasoning and free-form natural reasoning are conducted to validate the proposed method. Our contributions are summarized as follows: + +- We propose an effective and principled strategy called Entropy-Minimized Policy Optimization (EMPO) for incentivizing the reasoning capabilities of LLMs in a fully unsupervised manner. +- We establish semantic entropy as a potent intrinsic reward signal for guiding LLM reasoning. Our analysis confirms a strong negative correlation between semantic entropy and model + +accuracy, validating its efficacy as a robust, unsupervised optimization objective that drives models towards generating more consistent and reliable outputs. + +- Experiments on both math reasoning tasks with deterministic golden answers and freeform natural reasoning tasks are conducted to validate the efficacy and versatility of EMP0. Additionally, we provide critical insights into EMP0's mechanism, demonstrating that its effectiveness stems from an enhanced ability to consistently select and prioritize strong, pre-existing reasoning pathways learned during pre-training, rather than instilling fundamentally new reasoning skills. This underscores EMP0's strength in efficiently eliciting and refining latent capabilities within base models. + +# 2 Related Work + +Self-Supervised and Semi-Supervised Reasoning. To address the dependency on labeled data, several self-supervised and unsupervised methods have emerged. Huang et al. [6] propose a self-improvement framework where LLMs generate high-confidence answers using Chain-of-Thought (CoT) prompting and self-consistency, subsequently fine-tuning on these pseudo-labels. However, the performance gains are often limited, and there is a risk of model collapse, as noted in [7]. Recently, Patel et al. [13] apply self-improvement to web navigation tasks in WebArena, fine-tuning on synthetic data generated by the model itself. Li et al. [14] enhance long-context reasoning via SeaLong, sampling multiple outputs and optimizing with Minimum Bayes Risk. These methods, while reducing reliance on external labels, still involve supervised fine-tuning steps, contrasting with EMPO's fully unsupervised RL approach. A concurrent work, i.e., test-time reinforcement learning (TTRL) [15] directly obtains pseudo label by major voting and then conducts RL on test prompts at inference time, whereas our EMPO strictly maintains the separation between training and testing phases for ensuring that the model remains unexposed to any test prompts during training. Furthermore, while TTRL is currently limited to mathematical tasks, our approach is applicable to more general free-form reasoning tasks. + +Self-Rewarding and RL-based Reasoning. RL has become a prominent technique for enhancing LLM reasoning, often leveraging external or self-generated rewards. Yuan et al. [16] propose using the LLM itself via LLM-as-a-Judge prompting to provide rewards during training, reducing reliance on human feedback. Similarly, Xiong et al. [9] propose a two-stage self-rewarding framework for mathematical reasoning, generating data and applying RL with a ground-truth verifier for self-correction, achieving superior performance but requiring supervised signals. Jiao et al. [8] frame solution labeling as evaluation against test cases, yet still rely on instruction fine-tuning and frontier LLM signals for RL initialization. Wen et al. [17] introduce Entropy-Regularized Token-Level Policy Optimization (ETPO), augmenting RL with an entropy bonus to promote exploration, differing from EMP0's entropy minimization focus. Guo et al. [12] with DeepSeek-R1 demonstrate robust reasoning via RL from a base model, using rule-based rewards. Xi et al. [18] present $\mathbb{R}^3$ , a reverse curriculum RL approach using outcome supervision to mimic process supervision benefits. Wang et al. [19] propose CREAM, which enforces consistency regularization between internal reward models during self-training. These methods highlight a spectrum of supervision levels, positioning EMP0 as unique in its fully unsupervised nature, leveraging semantic entropy as an internal reward. + +Entropy Minimization and Semantic Consistency. Entropy minimization is a well-established technique in semi-supervised and unsupervised learning, with roots in traditional machine learning. Grandvalet and Bengio [20] demonstrate that minimizing entropy on unlabeled data can improve classification accuracy by encouraging model confidence. Test-time adaptation methods like Tent [21] adapt models to new domains by minimizing entropy on test data, filling domain gaps without additional labels. More recent work, COME, [22] extends this principle to conservative entropy minimization for robust adaptation. These approaches highlight the potential of entropy minimization as an unsupervised objective, which EMP0 leverages for LLM reasoning by extending it to semantic entropy [23] in a latent space. Farquhar et al. [24] further validate semantic entropy's utility in detecting hallucinations, reinforcing its relevance. Kharitonov et al. [25] explore entropy minimization in emergent languages, finding it naturally aligns with successful communication, providing additional theoretical foundation for EMP0. + +# 3 Method + +We propose an RL-based method to minimize the entropy of LLM generations in a latent semantic space for incentivizing its reasoning capability. We term our method Entropy-Minimized Policy Optimization (EMPO), which is devised in a fully unsupervised manner without any forms of external supervised information. + +# 3.1 Preliminaries + +Recent advancements in reinforcement learning have demonstrated remarkable breakthroughs in enhancing the reasoning capabilities of LLMs. Taking the representative RL technique Group Relative Policy Optimization (GRPO) [11] used by DeepSeek-R1-Zero [12] as an example. GRPO first samples a group of outputs $\{o_1, \dots, o_G\}$ from the policy model $\pi_{\theta}$ and then optimizes it by maximizing the following objective: + +$$ +\begin{array}{l} \mathcal {J} _ {\mathrm {G R P O}} = \mathbb {E} _ {[ q \sim P (Q), \{o _ {i} \} _ {i = 1} \sim \pi_ {\theta (O | q)} ]} \\ \left[ \frac {1}{G} \sum_ {i = 1} ^ {G} \left(\min \left(A _ {i}, \operatorname {c l i p} (1, 1 - \epsilon , 1 + \epsilon) A _ {i}\right) - \beta K L \left(\pi_ {\theta} \mid \mid \pi_ {r e f}\right) \right. \right], \tag {1} \\ \end{array} +$$ + +where $\beta$ is a hyper-parameter which avoids the policy model to diverge too far away from the reference model $\pi_{ref}$ . $\epsilon$ clips extreme advantages for stability. $G$ is the number of samples in one group. $A_{i}$ is the advantage computed by normalizing the rewards within each group as $A_{i} = \frac{r_{i} - mean(\{r_{1},\cdots,r_{G}\})}{std(r_{1},\cdots,r_{G})}$ . In math reasoning task, the reward can be computed by predefined rules: + +$$ +r _ {i} = \left\{ \begin{array}{l l} 1 & \text {i f v e r i f i e r} (o _ {i}, a) = \text {T r u e} \\ 0 & \text {o t h e r w i s e} \end{array} , \right. \tag {2} +$$ + +where a verifier is used to determine the correctness of $o_i$ by comparing it with the golden answer $a$ . + +Unlike the above example, we consider fully unsupervised optimization settings where there are no golden answers to verify the correctness of model predictions. In this circumstance, we only have unlabeled reasoning problems $P(Q)$ . Such problems were freely raised by users during the deployment of LLMs. Given a pre-training LLM $\pi_{\theta}$ parameterized by $\theta$ , our goal is to enhance its reasoning ability by only utilizing the unlabeled user problems $\{q_i\}_{i=1}^n$ , which requests minimized cost of data collection. + +# 3.2 Semantic Entropy Minimization Objective + +Entropy is a classical unsupervised objective in the traditional semi-supervised and unsupervised learning fields [20, 26]. Previous works in computer vision show that by continuously minimizing the entropy on unlabeled samples after pre-training, the classification accuracy of machine learning models can be significantly improved to fill the domain gaps [21, 22]. The basic intuition behind entropy minimization is that a robust model should not only fit labeled data well but also make confident and consistent predictions on unlabeled data. This principle encourages the model to avoid ambiguity and make decisive predictions, thereby enhances generalization. In this work, we choose semantic entropy [23] as our unsupervised optimization objective, which is a natural extension of classical Shannon entropy specified for large language models. Intuitively speaking, minimizing semantic entropy encourages the LLMs' outputs to be more consistent in semantic level rather than format, and thus the final answers are expected to be more reliable. + +Specifically, semantic entropy first samples a group of outputs $\{o_1,\dots ,o_G\}$ and then clusters the output sequences according to their meaning. That is, if two outputs share the same meaning (i.e., they are bidirectionally entailed), they should be merged into one same cluster in the semantic space. This can be done without notable computational cost by predefined rules such as N-gram, regular expressions or an additional small language model. Once built such a set of meaning clusters $\{c\}$ in semantic space, we then approximate the probability over the meanings as the proportion of sampled answers as + +$$ +p \left(c _ {j} \mid x\right) \approx \left| c _ {j} \right| / G, \tag {3} +$$ + +where $c_{j} \in \{c\}$ is the $j$ -th meaning cluster. $|c_{j}|$ denotes the numbers of outputs that belong to $c_{j}$ . Finally, given question $q$ , the semantic entropy (denoted as $H$ ) over the model's output meanings + +distribution can be estimated as follows + +$$ +H = - \sum_ {c _ {j} \in \{c \}} p (c _ {j} | q) \log p (c _ {j} | q). \tag {4} +$$ + +As proven by previous work, semantic entropy has a strong negative relationship with model accuracy, which can be used as an efficient measurement to detect unreliable LLM generations such as confabulation and hallucination [23, 24]. Motivated by this, we propose to leverage semantic entropy as an unsupervised optimization objective for incentivizing the reasoning capability of LLM. + +# 3.3 Entropy-Minimized Policy Optimization + +We propose Entropy-Minimized Policy Optimization (EMPO), an RL-based method that optimizes the pre-trained large language model $\pi_{\theta}$ to favor low semantic entropy responses given unlabeled user questions $\{q_i\}_{i=1}^n$ . Given input questions, EMPO incentivizes the outputs that belong to higher probability meaning cluster, and thus minimizes the semantic entropy over the meaning distribution. Specifically, given a question $q$ , our EMPO first samples a group of output $\{o_1, \ldots, o_G\}$ from the current model $\pi_{\theta}$ and then merges them into a set of $M$ meaning clusters $\{c_1, \ldots, c_M\}$ . As we mentioned before, this can be done without notable computational cost (please refer to the quantitative results in Appendix F) by predefined rules such as N-gram, regular expressions or an additional small language model (SLM) $^1$ . Once built such a meaning set, EMPO approximately minimizes the semantic entropy $H$ by maximizing the following objective + +$$ +\mathcal {J} _ {\mathrm {E M P O}} = \mathbb {E} _ {[ \{q \} \sim P (Q), \{o _ {i} \} _ {i = 1} ^ {G} \sim \pi_ {\theta} (O | q) ]} \frac {1}{| G |} \sum_ {i = 1} ^ {| G |} \left(A _ {i}\right), A _ {i} = \frac {r _ {i} - m e a n \left(\left\{r _ {1} , \cdots , r _ {G} \right\}\right)}{\operatorname {s t d} \left(r _ {1} , \cdots , r _ {G}\right)} \tag {5} +$$ + +where $A_{i}$ is the advantage of output $o_{i}$ calculated by normalizing the rewards. Unlike GRPO in which the rewards is calculated depending on external supervision such as pre-defined rules or an reward model, in EMP0, the reward assigned for the $i$ -th outputs $o_{i}$ is the likelihood of its meaning cluster, i.e., + +$$ +r _ {i} = p \left(c _ {j} \mid q\right), \text {w h e r e} l \left(o _ {i}\right) = c _ {j}, \tag {6} +$$ + +where the meaning likelihood $p(c_{j}|q)$ is approximated by Eq. 3. Intuitively, the outputs convey higher-probability meanings are of higher advantages, and are therefore incentivized through training. + +How to Mitigate Potential Reward Hacking? Note that different from verifiable rule-based reward, which inherently resists reward hacking risks, optimizing unsupervised entropy objectives may permit trivial solutions. For instance, models could exploit the reward signal by overfitting to high-confident but wrong predictions for the most frequent semantic clusters without carefully reasoning process. To address this, we implement a straightforward entropy thresholding strategy, restricting optimization to prompts exhibiting moderate uncertainty via dual threshold criteria. Specifically, two entropy thresholds are deployed to filter out user queries $q$ that result in overly high or low entropy unreliable answers. Extremely high entropy indicates that the model is highly uncertain, and thus its predictions are prone to be unreliable. In addition, continuously optimizing on responses with already low entropy is redundant and at the risk of overconfidence [27]. The final optimization objective of EMPO is + +$$ +\mathcal {J} _ {\mathtt {E M P O}} = \mathbb {E} _ {[ \{q \} \sim P (Q), \{o _ {i} \} _ {i = 1} ^ {G} \sim \pi_ {\theta} (O | q) ]} +$$ + +$$ +\left[ \frac {1}{| G |} \sum_ {i = 1} ^ {| G |} \left(\min \left(A _ {i}, \operatorname {c l i p} (1, 1 - \epsilon , 1 + \epsilon) A _ {i}\right) \right. \right], \tag {7} +$$ + +$$ +\mathrm {s . t .} \delta_ {l o w} < H < \delta_ {h i g h} +$$ + +where $H$ is the semantic entropy defined in Eq. 4. The questions results in highly unreliable answers with entropy greater than $\delta_{high}$ are filtered out. Besides, we also filter out low-entropy answers to maintain the diversity of model outputs and further avoid potential reward hacking. Following previous work [28], we remove the KL constraint for better performance. $\epsilon$ clips extremely high or low advantages for stability similar to common practice. + +# 4 Experiments + +# 4.1 Experimental Settings + +We conduct experiments on multiple datasets including both closed-form math reasoning tasks and free-form natural reasoning tasks. Our EMP0 shows competitive performance by purely RL in a fully unsupervised manner compared to supervised finetuning and RL methods. + +Prompt Collection and Data Engineering. For mathematical reasoning, following the common practice [29, 8, 30], we adopt 20,000 prompts randomly selected from NuminaMath-CoT dataset [31] for training $^{2}$ without additional data engineering. For free-form natural reasoning tasks, we adopt the prompts from Natural Reasoning $^{3}$ , a large-scale dataset consisting of diverse reasoning questions from multiple domains (e.g., Physics, Computer Science, Economics, Social Sciences and more). For training efficiency, we filter out the questions with over-long prompt or reference answer. Besides, taking inspiration from [32, 33, 34], we use the response length of Llama3.3-70B-Instruct as a difficulty estimation metric, and filter out samples with response lengths exceeding 4096 tokens. The remaining samples are simpler for stabilizing the training process. The final training subset is consisted of 18,000 questions. More details can be found in Appendix G. + +Evaluation. For mathematical reasoning, the performance is evaluated on a diverse suite of benchmarks including Minerva Math, MATH, AMC23, OlympaidBench and AIME24. The evaluation codebase is borrowed from the SimpleRL project [35], which is consistent with other concurrent works [30]. For free-form natural reasoning, we evaluate on MMLU-Pro [36] and GPQA [37] benchmarks, which consist of challenging reasoning-focused problems across various subjects, e.g., biology, business, chemistry, computer science and so on. We prompt the model to reason step by step and output the final answer within "\boxed{}" and report the multi-choice accuracy. Without specific clarification, all evaluations are conducted using zero-shot prompting and greedy-decoding. + +Model training. For mathematical reasoning tasks, we train Qwen2.5-Math-1.5B and 7B Base models with our EMP0. The baselines we consider include supervised finetuning (SFT), online direct preference optimization (ODPO) [30] and the representative GRPO. We also compared with Qwen2.5-Math Instruction models for a more comprehensive comparison, where the instruction model is trained by iteratively supervised finetuning and RL on private data. For free-form natural reasoning tasks, we initialize from Qwen2.5-3B, 7B and 14B Base models. Different from mathematical reasoning, it is difficult to adopt rule-based reward for free-form question-answering tasks without deterministic golden answers. We consider the corresponding Instruct model, the Base model with or without few-shot CoT prompt as baselines. Besides, we also compare with SFT where the Base model is tuned to fit the response of Llama3.3-70B-Instruct. For more results on other model families beyond the Qwen series (e.g., Llama3), please refer to the Appendix D. + +- SFT: We train models by supervised finetuning via Open-Instruct [38] with a fixed learning rate of $1 \times 10^{-6}$ , a global batch size of 128 and train for 1 epoch with a max length of 2048. +- GRPO: We implement GRPO viaTRL [39] based on Open-R1 [29]. We sample 7 and 12 responses for each prompt for mathematical and natural reasoning tasks respectively. We train the model for 3 epochs with a maximum generation length of 2048. Following [40], we only use the rule-based accuracy reward and do not adopt format-reward. The accuracy reward is implemented as follows: If the response contains the correct final answer within "boxed{}", it receives a reward of 1. If the model prediction is wrong, it receives a reward of 0. When there is no answer can be extracted from the model's response, the reward is $-0.5$ . +- Online-DPO: Recent advanced Online-DPO first samples a set of responses and then verifies and selects the responses with highest reward and lowest reward as a preference pair. We directly copy the results from [30], where the model is trained for 7 iterations. Each iteration involves 2 training epochs and 20K training samples, i.e., 140K training samples in total. +- EMP0: Most hyper-parameters of our method, e.g., number of generations, max generation length, batch size, learning rate are the same with GRPO. In mathematical reasoning tasks, we use a set of regular expressions to merge the outputs into meaning clusters. For + +more general free-form natural reasoning, we leverage General-Verifier $^4$ (a compact small language model with 1.5B parameters) to determine whether two outputs are of the same meaning or not following [23, 24]. A concrete example can be found in Appendix C. Specifically, if the final predictions (i.e., the contents within "\boxed{}") of two model outputs are bidirectionally implicating, then we merge them into one semantic cluster ignoring their reasoning traces. More details are in Appendix E. + +# 4.2 Main Results + +# 4.2.1 Performance on Mathematical Reasoning Tasks. + +We conduct experiments on mathematical tasks to evaluate our method. The main results are shown in Table 1. EMP0 has successfully incentivized the Qwen2.5-Math Base model with reasoning capability without dependency on any external supervision. We observe a substantial improvement in the average performance on commonly used mathematical reasoning benchmarks from $28.1\%$ to $42.1\%$ and $30.7\%$ to $48.1\%$ on 1.5B and 7B models, respectively. Notably, through fully unsupervised RL training, the 1.5B and 7B model has both achieved competitive performance (42.1% and $48.1\%$ ) near to Qwen2.5-Math-Instruct (40.5% and $49.4\%$ ), where the latter depends on private dataset and multi-stage iteratively supervised fine-tuning and reinforcement learning. + +Table 1: Accuracy on mathematical reasoning benchmarks. We report the pass@1 accuracy tested with greedy decoding. The results of ODPO are directly copied from [30]. Here $q, r, a$ denote the dependency on questions, human-verified reasoning traces and golden answers respectively. + +
SupervisionMATHMinerva MathOlympiad BenchAIME24AMC23Avg.
frontier model
Llama-3.1-70B-Instruct{q,r,a}64.635.331.916.730.135.7
Eurus-2-7B-PRIME{q,r,a}79.238.642.126.757.848.9
1.5B model
Qwen2.5-MathNone52.210.725.210.042.528.1
Qwen2.5-Math-Instruct{q,r,a}73.830.938.76.752.540.5
Qwen2.5-Math w/SFT{q,r,a}61.826.127.13.337.531.2
Qwen2.5-Math w/GRPO{q,a}75.232.033.616.752.542.0
Qwen2.5-Math w/EMPO{q}73.032.436.613.355.042.1
7B model
Qwen2.5-MathNone64.815.126.76.740.030.7
Qwen2.5-Math Instruct{q,r,a}82.843.841.216.762.549.4
Qwen2.5-Math w/SFT{q,r,a}72.234.633.210.045.039.0
Qwen2.5-Math w/ODPO{q,a}76.830.937.926.762.547.0
Qwen2.5-Math w/GRPO{q,a}77.839.739.120.057.546.8
Qwen2.5-Math w/EMPO{q}78.040.437.320.065.048.1
+ +# 4.2.2 Performance on Natural Free-form Reasoning Tasks. + +We present the results on free-form natural reasoning tasks in Table 2. On the MMLU-Pro benchmark, our EMP0 improves the accuracy from $32.1\%$ to $50.1\%$ and $32.7\%$ to $58.8\%$ on Qwen2.5-7B and 14B Base model respectively. Besides, on more challenging GPQA benchmark, EMP0 results in increasing accuracy from $15.9\%$ to $28.8\%$ on 7B model, $30.6\%$ to $35.3\%$ on 14B model. Notably, we observe that the SFT baseline fails to consistently improve model performance. We hypothesize that this is due to the noise in the reference responses within the Natural Reasoning training data (as mentioned by [32]). This phenomenon further underscores the practical potential of our proposed method. + +# 4.2.3 Training Dynamics + +We further conduct experiments to investigate the reliability of our unsupervised reward signals. As shown in Figure 3, the unsupervised reward signals of EMP0 have a strongly negative correlation with the true rewards based on golden answers. Thus, by continuously minimizing the semantic entropy objective, the model can boost its accuracy in a fully unsupervised manner. + +Table 2: Accuracy results on free-form natural reasoning benchmarks. We report pass@1 accuracy tested with greedy decoding. Here $\{q,r,a\}$ denote the dependency on questions, human-verified reasoning traces and verifiable golden answers respectively. + +
SupervisionMMLU ProGPQA
STEMHumanitiesSocialOtherAvg.
3B model
Qwen2.5-Base-8.325.357.424.156.8311.2
Qwen2.5-Base 5-shot{q,r,a}34.726.247.935.935.313.8
Qwen2.5-Instruct{q,r,a}44.830.756.047.144.528.2
Qwen2.5-Base w/SFT{q,r,a}19.810.428.018.419.111.5
Qwen2.5-Base w/GRPO{q,a}32.227.749.838.735.217.1
Qwen2.5-Base w/EMPO{q}31.726.248.136.734.120.6
7B model
Qwen2.5-Base-30.123.845.934.332.115.9
Qwen2.5-Base 5-shot{q,r,a}45.736.359.149.446.823.5
Qwen2.5-Instruct{q,r,a}56.938.164.158.655.235.3
Qwen2.5-Base w/SFT{q,r,a}32.67.115.830.125.622.4
Qwen2.5-Base w/GRPO{q,a}57.136.264.456.654.533.8
Qwen2.5-Base w/EMPO{q}52.434.659.050.950.128.8
14B model
Qwen2.5-Base-30.828.044.433.032.730.6
Qwen2.5-Base 5-shot{q,r,a}51.935.863.454.451.433.2
Qwen2.5-Instruct{q,r,a}63.647.173.866.762.942.9
Qwen2.5-Base w/SFT{q,r,a}37.027.840.238.036.128.5
Qwen2.5-Base w/GRPO{q,a}62.942.168.659.859.635.6
Qwen2.5-Base w/EMPO{q}61.441.668.360.058.835.3
+ +![](images/54ea7ba11ac679ed9b70c45139b15bb0c494d20f9cfe7b9ba7e519498e1d1d00.jpg) +Figure 3: We visualize the training dynamics when tuning Qwen2.5-Math-7B Base model with EMP0 on 20K prompts randomly sampled from NuminaMath-CoT. The left illustrates the running average of semantic entropy (Eq. 4). The middle shows the trend of our unsupervised reward as defined by Eq. 6. The right shows the model accuracy on training data at each RL steps. Along the unsupervised RL-based training trajectory, EMP0 establishes a stable learning process with consistently decreased semantic entropy and improved accuracy. + +![](images/075188f39a5ebdacf50005cb09d7e38cd4328467bddde5289c5e00402e56d320.jpg) + +![](images/f93c2ee71c0adf195cf26c5902acef4260f08f79adba738f3dcae5c452ddfc31.jpg) + +# 5 Discussion and Conclusion: The Role of Unsupervised Learning in Eliciting Pre-Trained Reasoning Capabilities + +The strong empirical performance of EMP0, particularly its ability as a fully unsupervised method to match or even slightly outperform supervised counterparts like GRPO (as observed with the 7B model), prompts a deeper examination of how such reasoning incentivization mechanisms work. This is especially pertinent given the counterintuitive observation that these substantial improvements on benchmarks are achieved without a consistent increase in response length or clear evidence of an "Aha moment" – a hypothesized sudden emergence of enhanced reasoning capabilities. + +To dissect the nature of the improvements conferred by reinforcement learning (RL) post-training, we investigated its influence on pass@k accuracy. This metric is crucial as recent studies [41, 42] suggest that RL may not fundamentally expand the inherent reasoning capacities of LLMs beyond those already embedded in their pre-trained base. As depicted in Figure 4, our findings align with this perspective. Both GRPO and EMP0 significantly enhance pass@k scores for small to moderate + +![](images/335efb518b469c6cc68204d57510d455982bcad4c3c5a141a0b1137dc465080b.jpg) +Figure 4: Pass@k curves of Qwen2.5-Math-7B Base model and its counterparts trained with GRPO and our EMP0 on Minerva Math and OMNI reasoning benchmarks. Pass@k measures the probability that at least 1 of the top $k$ generated solutions is correct. Pass@1 is equivalent to accuracy, as it checks if the single solution is correct. When $k$ is small, RL-trained models outperform the original base model. However, as $k$ increases (e.g., into the tens or hundreds), the performance of the base models often converges with, or even exceeds, that of the RL-trained models. + +![](images/9954ebe5c2ce338a3ab700264b280f56e5763e1ff349dfed0fa4106fd524e1d7.jpg) + +values of k (e.g., $k = 16$ or 32) compared to the base model. This demonstrates an improved efficiency in surfacing correct reasoning paths with fewer attempts. However, as k becomes substantially large, the performance of these RL-trained models tends to converge with, and is sometimes surpassed by, that of the base model. + +This convergence at high $k$ values, coupled with our qualitative observations that the base models themselves already exhibit sophisticated reasoning behaviors such as pausing, self-correction, and backtracking (see Appendix for examples), strongly indicates that the foundational reasoning pathways are largely pre-existing. Consequently, RL post-training, whether supervised or unsupervised like EMP0, appears to primarily refine the model's ability to efficiently access, prioritize, and consistently select these latent reasoning patterns, rather than instilling fundamentally novel ones. The observed improvements in pass@1 (accuracy) are thus likely a consequence of this enhanced sampling efficiency. + +These empirical insights from the pass@k analysis lend considerable support to the emerging consensus that pre-training shoulders the primary burden of endowing LLMs with their core abilities. We align our interpretation with prior insights from [43]: "Pretraining does all the hard work. One big bet is that the pretraining phase grants all the abilities to the base LM, and finetuning is simply like a style transfer which positions the model to the right output space." Under this conjecture (or more precisely, an emerging, but not yet unanimously accepted consensus [41]), we attribute the efficacy of our method to the robust pretraining process of the Qwen2.5 Base model: If a base model possesses strong inherent reasoning capabilities, the subsequent challenge is not necessarily to teach it new reasoning skills from scratch, but rather to effectively elicit and guide these existing skills. + +EMPO's success highlights that intrinsic reward signals, derived purely from the model's objective to minimize semantic entropy and thus achieve greater consistency in its outputs, can be surprisingly potent for this elicitation process. In a well-pre-trained model, outputs that are semantically consistent are more likely to align with correct and coherent reasoning. EMPO leverages this by incentivizing the model to favor such consistent outputs, effectively guiding it to refine its selection from its collection of existing reasoning strategies without requiring external validation of correctness. + +In conclusion, while RL techniques, including EMP0, may not be forging entirely new fundamental reasoning capabilities beyond what pre-training provides, their role in significantly enhancing the sampling efficiency and reliability of accessing these pre-trained abilities is of paramount practical importance. Optimizing models for such efficiency is crucial for real-world applications. EMP0, by achieving this through a fully unsupervised framework, offers a particularly scalable, cost-effective, and practical approach to unlocking and refining the vast reasoning potential embedded within pre-trained LLMs, especially in domains where curated supervisory data is scarce or prohibitively expensive to obtain. + +# References + +[1] Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519, 2025. +[2] Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, et al. Process reinforcement through implicit rewards. arXiv preprint arXiv:2502.01456, 2025. +[3] Lifan Yuan, Wendi Li, Huayu Chen, Ganqu Cui, Ning Ding, Kaiyan Zhang, Bowen Zhou, Zhiyuan Liu, and Hao Peng. Free process rewards without process labels. arXiv preprint arXiv:2412.01981, 2024. +[4] Dejian Yang Daya Guo, Qihao Zhu. Deepseek-coder: When the large language model meets programming – the rise of code intelligence, 2024. +[5] An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024. +[6] Jiaxin Huang, Shixiang Shane Gu, Le Hou, Yuexin Wu, Xuezhi Wang, Hongkun Yu, and Jiawei Han. Large language models can self-improve. arXiv preprint arXiv:2210.11610, 2022. +[7] Ilia Shumailov, Zakhar Shumaylov, Yiren Zhao, Nicolas Papernot, Ross Anderson, and Yarin Gal. Ai models collapse when trained on recursively generated data. Nature, 631(8022):755-759, 2024. +[8] Fangkai Jiao, Geyang Guo, Xingxing Zhang, Nancy F Chen, Shafiq Joty, and Furu Wei. Preference optimization for reasoning with pseudo feedback. arXiv preprint arXiv:2411.16345, 2024. +[9] Wei Xiong, Hanning Zhang, Chenlu Ye, Lichang Chen, Nan Jiang, and Tong Zhang. Selfrewarding correction for mathematical reasoning. arXiv preprint arXiv:2502.19613, 2025. +[10] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017. +[11] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. +[12] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. +[13] Ajay Patel, Markus Hofmarcher, Claudiu Leoveanu-Condrei, Marius-Constantin Dinu, Chris Callison-Burch, and Sepp Hochreiter. Large language models can self-improve at web agent tasks. arXiv preprint arXiv:2405.20309, 2024. +[14] Siheng Li, Cheng Yang, Zesen Cheng, Lemao Liu, Mo Yu, Yujiu Yang, and Wai Lam. Large language models can self-improve in long-context reasoning. arXiv preprint arXiv:2411.08147, 2024. +[15] Yuxin Zuo, Kaiyan Zhang, Shang Qu, Li Sheng, Xuekai Zhu, Biqing Qi, Youbang Sun, Ganqu Cui, Ning Ding, and Bowen Zhou. Trl: Test-time reinforcement learning. arXiv preprint arXiv:2504.16084, 2025. +[16] Weizhe Yuan, Richard Yuanzhe Pang, Kyunghyun Cho, Xian Li, Sainbayar Sukhbaatar, Jing Xu, and Jason Weston. Self-rewarding language models, 2024. +[17] Muning Wen, Cheng Deng, Jun Wang, Weinan Zhang, and Ying Wen. Entropy-regularized token-level policy optimization for large language models. arXiv e-prints, pages arXiv-2402, 2024. +[18] Zhiheng Xi, Wenxiang Chen, Boyang Hong, Senjie Jin, Rui Zheng, Wei He, Yiwen Ding, Shichun Liu, Xin Guo, Junzhe Wang, et al. Training large language models for reasoning through reverse curriculum reinforcement learning. arXiv preprint arXiv:2402.05808, 2024. +[19] Zhaoyang Wang, Weilei He, Zhiyuan Liang, Xuchao Zhang, Chetan Bansal, Ying Wei, Weitong Zhang, and Huaxiu Yao. CREAM: Consistency regularized self-rewarding language models. In The Thirteenth International Conference on Learning Representations, 2025. + +[20] Yves Grandvalet and Yoshua Bengio. Semi-supervised learning by entropy minimization. Advances in neural information processing systems, 17, 2004. +[21] Dequan Wang, Evan Shelhamer, Shaoteng Liu, Bruno Olshausen, and Trevor Darrell. Tent: Fully test-time adaptation by entropy minimization. arXiv preprint arXiv:2006.10726, 2020. +[22] Qingyang Zhang, Yatao Bian, Xinke Kong, Peilin Zhao, and Changqing Zhang. Come: Test-time adaption by conservatively minimizing entropy. arXiv preprint arXiv:2410.10894, 2024. +[23] Lorenz Kuhn, Yarin Gal, and Sebastian Farquhar. Semantic uncertainty: Linguistic invariances for uncertainty estimation in natural language generation. arXiv preprint arXiv:2302.09664, 2023. +[24] Sebastian Farquhar, Jannik Kossen, Lorenz Kuhn, and Yarin Gal. Detecting hallucinations in large language models using semantic entropy. Nature, 630(8017):625-630, 2024. +[25] Eugene Kharitonov, Rahma Chaabouni, Diane Bouchacourt, and Marco Baroni. Entropy minimization in emergent languages. In International Conference on Machine Learning, pages 5220-5230. PMLR, 2020. +[26] Ori Press, Ravid Shwartz-Ziv, Yann LeCun, and Matthias Bethge. The entropy enigma: Success and failure of entropy minimization. arXiv preprint arXiv:2405.05012, 2024. +[27] Soren Mindermann, Jan M Brauner, Muhammed T Razzak, Mrinank Sharma, Andreas Kirsch, Winnie Xu, Benedikt Holgen, Aidan N Gomez, Adrien Morisot, Sebastian Farquhar, et al. Prioritized training on points that are learnable, worth learning, and not yet learnt. In International Conference on Machine Learning, pages 15630-15649. PMLR, 2022. +[28] Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. Dapo: An open-source llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476, 2025. +[29] Hugging Face. Open r1: A fully open reproduction of deepseek-r1, January 2025. +[30] Hanning Zhang, Jiarui Yao, Chenlu Ye, Wei Xiong, and Tong Zhang. Online-dpo-r1: Unlocking effective reasoning without the ppo overhead, 2025. Notion Blog. +[31] Jia LI, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, Kashif Rasul, Longhui Yu, Albert Jiang, Ziju Shen, Zihan Qin, Bin Dong, Li Zhou, Yann Fleureau, Guillaume Lample, and Stanislas Polu. Numinamath. [https://huggingface.co/AI-MO/NuminaMath-CoT](https://github.com/project-numina/aimo-progress-prize/blob/main/report/numina_dataset.pdf), 2024. +[32] Weizhe Yuan, Jane Yu, Song Jiang, Karthik Padthe, Yang Li, Dong Wang, Ilia Kulikov, Kyunghyun Cho, Yuandong Tian, Jason E Weston, et al. Naturalreasoning: Reasoning in the wild with $2.8\mathrm{m}$ challenging questions. arXiv preprint arXiv:2502.13124, 2025. +[33] Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. Do not think that much for $2 + 3 = ?$ on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187, 2024. +[34] Huan Ma, Jingdong Chen, Guangyu Wang, and Changqing Zhang. Estimating llm uncertainty with logits. arXiv preprint arXiv:2502.00290, 2025. +[35] Weihao Zeng, Yuzhen Huang, Wei Liu, Keqing He, Qian Liu, Zejun Ma, and Junxian He. 7b model and 8k examples: Emerging reasoning with reinforcement learning is both effective and efficient. https://hkust-nlp.notion.site/simplerl-reason, 2025. Notion Blog. +[36] Yubo Wang, Xueguang Ma, Ge Zhang, Yuansheng Ni, Abhranil Chandra, Shiguang Guo, Weiming Ren, Aaran Arulraj, Xuan He, Ziyan Jiang, et al. Mmlu-pro: A more robust and challenging multi-task language understanding benchmark. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024. +[37] David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R Bowman. Gpqa: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling, 2024. +[38] Nathan Lambert, Jacob Morrison, Valentina Pyatkin, Shengyi Huang, Hamish Ivison, Faeze Brahman, Lester James V. Miranda, Alisa Liu, Nouha Dziri, Shane Lyu, Yuling Gu, Saumya Malik, Victoria Graf, Jena D. Hwang, Jiangjiang Yang, Ronan Le Bras, Oyvind Tafjord, Chris + +Wilhelm, Luca Soldaini, Noah A. Smith, Yizhong Wang, Pradeep Dasigi, and Hannaneh Hajishirzi. Tulu 3: Pushing frontiers in open language model post-training. 2024. +[39] Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. Trl: Transformer reinforcement learning. https://github.com/huggingface/trl, 2020. +[40] Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, and Heung-Yeung Shum Xiangyu Zhang. Open-reasoner-zero: An open source approach to scaling reinforcement learning on the base model. https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero, 2025. +[41] Yang Yue, Zhiqi Chen, Rui Lu, Andrew Zhao, Zhaokai Wang, Shiji Song, and Gao Huang. Does reinforcement learning really incentivize reasoning capacity in llms beyond the base model? arXiv preprint arXiv:2504.13837, 2025. +[42] Yuda Song, Hanlin Zhang, Carson Eisenach, Sham Kakade, Dean Foster, and Udaya Ghai. Mind the gap: Examining the self-improvement capabilities of large language models. arXiv preprint arXiv:2412.02674, 2024. +[43] Zhengxuan Wu, Aryaman Arora, Zheng Wang, Atticus Geiger, Dan Jurafsky, Christopher D Manning, and Christopher Potts. Reft: Representation finetuning for language models. Advances in Neural Information Processing Systems, 37:63908-63962, 2024. +[44] George Casella and Roger Berger. Statistical inference. CRC press, 2024. + +# Appendices + +A Prompt Templates 13 +B Case Study 15 +C Implementation Details about Semantic Clustering 16 +D Additional Results on Llama3 Model Series 16 +E Additional Training Details 18 +F Computational Cost of Semantic Clustering 18 +G Details of Prompt Collection 19 +H Additional Result about Pass@k 19 +I The Influence of Clustering Quality on the Performance of EMPO 19 + +# A Prompt Templates + +We provide the prompt templates used for training and evaluation. + +For mathematical reasoning tasks, we adopt the following reasoning prompt template similar to Online-DPO-R1 project [30] for both training and testing. During testing, we found that by adding system prompt, the accuracy of Qwen2.5-Math Base model can be better on mathematical benchmarks. However, system prompt would not help in natural reasoning tasks. Thus we use the same test prompt (start with system prompt) for both Base model and finetuned models in mathematical tasks. In natural reasoning tasks, we do not add system prompt for Base models. + +# Mathematical Reasoning Training and Evaluation Template + +```txt +system +Please reason step by step, and output your final answer within \boxed{}}. + +user +{Question} Let's think step by step and output the final answer within \boxed{}}. + +assistant +``` + +To train models with our EMPO for free-form natural reasoning tasks, we adopt the following reasoning prompt template similar to that we used in mathematical tasks for training. + +# Free-form Natural Reasoning Training Template + +```txt +system +Reason step by step, and output your final answer within \boxed{}?. + +user +{Question} Reason step by step and output the final answer within \boxed{}?. + +assistant +``` + +Since the MMLU-Pro and GPQA are both close-formed multi-choice benchmark. To evaluate the natural reasoning capability of the models, we use the following prompt template during testing. + +# MMLU Pro Test Template for Base Models + +Question: {Question} Reason step by step and output the final answer (the correct letter choice from A-P) within \boxed{}}. + +Answer: + +# Few Shot MMLU Pro Test Template + +Question: {Question in Demonstration 1} Reason step by step and output the final answer (the correct letter choice from A-P) within \boxed{} + +Answer: Let's reason step by step. CoT of Demonstration 1 Therefore, the correct answer is Answer of Demonstration 1. + +(Omit more demonstrations for readability) + +... Question: {Question} Reason step by step and output the final answer (the correct letter choice from A-P) within \boxed{}}. + +Answer: + +# MMLU Pro Test Template for Finetuned Models (SFT and RL) + +system +Reason step by step, and output your final answer (the correct letter choice from A-P) within +\boxed{} + + + +user + +{Question} Reason step by step and output the final answer (the correct letter choice from A-P) within \boxed{} + + + +assistant + +# GPQA Test Prompt for Base Models + +Question: {Question} Reason step by step and output the final answer (A, B, C, or D) within \boxed{} + +Answer: + +# Few Shot GPQA Test Prompt for Base Models + +Question: {Question in Demonstration 1} Reason step by step and output the final answer (A, B, C, or D) within \boxed{} + +Answer: Let's reason step by step. CoT in Demonstration 1 Therefore, the correct final answer is Answer in Demonstration 1. + +(Omit more demonstration for readability) + +... Question: {Question} Reason step by step and output the final answer (A, B, C, or D) within \boxed{} + +Answer: + +# GPQA Test Prompt for Finetuned Models (SFT and RL) + +system + +Reason step by step, and output your final answer (A, B, C, or D) within \boxed{} + + + +user + +{Question} Reason step by step and output the final answer (A, B, C, or D) within \boxed{} + + + +assistant + +# B Case Study + +# Qwen2.5-Math-7B's Answer to the 18-th Question in AMC23 + +Question: Janet rolls a standard 6-sided die 4 times and keeps a running total of the numbers she rolls. What is the probability that at some point, her running total will equal 3? The final answer can be written in the form $\frac{m}{n}$ , where $m$ and $n$ are relatively prime positive integers. What is $m + n$ ? Response: To solve this problem, we need to determine the probability that at some point during 4 rolls of a standard 6-sided die, the running total will equal 3. We can use dynamic programming to keep track of the probabilities of each possible running total after each roll. Here's the plan: + +1. Initialize a list to keep track of the probability of each possible running total after each roll. +2. Start with the initial condition that the probability of a running total of 0 after 0 rolls is 1. +3. For each roll, update the probabilities of each possible running total by considering the outcomes of rolling a 1, 2, 3, 4, 5, or 6. +4. After each roll, check if the probability of a running total of 3 is non-zero. +5. The final answer will be the probability of a running total of 3 after 4 rolls. Let's implement this in Python: + +··· + +(We omit the python code for readability.) + +··· + +It looks like there was an issue with the implementation. The probability of the running total being exactly 3 after 4 rolls should not be zero, so let's correct the approach and re-implement the solution. Here's the corrected plan: + +1. Initialize a list to keep track of the probability of each possible running total after each roll. +2. Start with the initial condition that the probability of a running total of 0 after 0 rolls is 1. +3. For each roll, update the probabilities of each possible running total by considering the outcomes of rolling a 1, 2, 3, 4, 5, or 6. +4. After each roll, check if the probability of a running total of 3 is non-zero. +5. The final answer will be the probability of a running total of 3 after 4 rolls. + +Let's re-implement this in Python: + +··· + +(We omit the python code for readability.) + +It seems there is still an issue with the implementation. Let's manually check the logic and correct it step by step. + +1. Initialize the probability list with the initial condition that the probability of a running total of 0 after 0 rolls is 1. +2. For each roll, update the probabilities of each possible running total by considering the outcomes of rolling a 1, 2, 3, 4, 5, or 6. +3. After each roll, check if the probability of a running total of 3 is non-zero. +4. The final answer will be the probability of a running total of 3 after 4 rolls. Let's manually simulate the process: - After 0 rolls: Probability of 0 is 1 + +··· + +The probability of a running total of 3 after 4 rolls is $4 / 1296$ , which simplifies to $1 / 324$ . Therefore, the final answer in the form of $m / n$ is $1 / 324$ , and the sum $m + n$ is $1 + 324 = 325$ . The final answer is $\boxed{325}$ . + +# C Implementation Details about Semantic Clustering + +We detail the implementation of semantic clustering in our EMP0. The semantic cluster process is shown as Algorithm 1. We also provide the simplified python code to show how we determine whether two model outputs are equivalent or not in mathematical reasoning (Algorithm 2) and free-form natural reasoning tasks (Algorithm 3). + +Algorithm 1: Semantic Clustering +```verilog +Require : question $q$ , a group set of model response $\{o_2,\dots,o_G\}$ , verifier $\nu$ Initialize: $C = \{o_1\}$ +for $2\leq i\leq G$ do +for $c\in C$ do // Random choose one element from $c$ for comparison $o_c = c[0]$ // Is the meaning of old sequence equivalent to new one? if $\mathcal{V}(q,o_c,o_i) ==$ True then // Put into existing class $c = c\cup \{o_i\}$ break +end +end +// $o_i$ is semantically distinct, belongs to a novel cluster. + $C\gets C\cup \{o_i\}$ +end +Return :C +``` + +Algorithm 2: Implementation of verifier for mathematical reasoning tasks. +```python +from math_VERIFY import parse, verify +def are_equivalent (model_output_1, model_output_2) prediction_1 $=$ parse(model_output_1) prediction_2 $=$ parse(model_output_2) return verify(prediction_1,prediction_2) +``` + +# D Additional Results on Llama3 Model Series + +We conduct additional experiments to validate the efficacy of our EMP0 on other model series beyond Qwen2.5. The results are shown in Table 3. Consistent with other concurrent practice, we are unable to implement R1-Zero-like training on the Llama series, i.e., directly initializing RL process from the Base model without SFT). Thus, we instead consider a semi-supervised learning approach by initializing from instruct-tuned model and enhance the reasoning capability with our EMP0. As shown in Table 3, when initialize from Llama3.2-3B-Instruct model, our EMP0 can also substantially improve reasoning capability of instruct-tuned model which have undergone carefully-designed post-training. + +# Why Qwen2.5 Base model can initialize fully unsupervised RL training, while Llama3 can not? + +Consistent with open-source community practices, we found that R1-Zero-like RL training can only be reproduced unsupervised on Qwen2.5 series Base models. In contrast, Llama3 series model still necessitate "cold-start", i.e., SFT, before RL. Specifically, in our experiments, the Qwen2.5 Base models demonstrated inherent answer consistency from the initial stages of EMPO training. However, Llama3 series Base models suffer severe inconsistency and fail to convergence during training. We hypothesize this divergence stems from Qwen2.5's pretraining strategy. As mentioned in the technical report [5], the pretrain data corpus are mixed with both web text and QA pairs generated by instruct-tuned Qwen2 models. This endows Qwen2.5 Base models with native instruction-following capabilities. Experimental evidence supports this hypothesis. As shown in Table 2, Qwen2.5 Base models successfully follow the instruction such as "put the final answer (A-P) within box" when answering multiple-choice questions from MMLU Pro and achieve an accuracy notably higher than random guess. + +Algorithm 3: Implementation of verifier for natural reasoning tasks. +```python +{ + verifier = AutoModelForCausalLM.from_pretrained(...); + tokenizer = AutoTokenizer.from_pretrained(...); +} +def are_equivalent(model_output_1, model_output_2, question, verifier) + prediction_1 = parse(model_output_1) + prediction_2 = parse(model_output_2) + prompt = ( + f"User: ## Question: {question}\n\n" + f"## Ground Truth Answer: {prediction_1}\n\n" + f"## Student Answer: {prediction_2}\n\n" + "For the above question, please verify if the student's answer is equivalent to the ground truth answer.\n" + "Do not solve the question by yourself; just check if the student's answer is equivalent to the ground truth answer.\n" + "If correct, output Final Decision: Yes". + "If incorrect, output Final Decision: No\..\n" + "Assistant: Final Decision: " + ) + inputs = selftokenizer(modified_prompt, return_tensors="pt").to(self.model_device) + input_ids = inputs-input_ids + # inference for output logits + with torch.inference_mode(): + outputs = self.model.forward(input_ids) + logits = outputs.logits + # get next output logits + next_token_logits = logits[0, input_ids.shape[1] - 1, :] + # get the token ID of "Yes" and "No" + decision_tokens = selftokenizer("Yes", "No") + yes_id = decision_tokens.input_ids[0] + no_id = decision_tokens.input_ids[1] + # calculate probability + probs = torch softmax(next_token_logits, dim=0) + yes_prob = probs[yes_id].item() + no_prob = probs[no_id].item() + return yes_prob > no_prob +``` + +Table 3: Accuracy on mathematical reasoning benchmarks. + +
SupervisionMATHMinerva MathOMNIAIME24AMC23Avg.
frontier model
Llama-3.1-70B-Instruct{q,r,a}64.635.331.916.730.135.7
Eurus-2-7B-PRIME{q,r,a}79.238.642.126.757.848.9
1B model
Llama3.2-InstructNone27.25.15.60.010.09.6
Llama3.2-Instruct w/GRPO{q,a}29.83.76.40.012.510.5
Llama3.2-Instruct w/EMPO{q}31.05.17.93.37.511.0
3B model
Llama3.2-InstructNone46.219.115.33.320.020.8
Llama3.2-Instruct w/GRPO{q,a}49.222.417.613.332.527.0
Llama3.2-Instruct w/EMPO{q}49.820.218.413.330.026.3
+ +# E Additional Training Details + +We provide a brief summary of our training recipes in Table 4. Besides, we have release the code in the supplementary materials which contained the full training configurations for re-implementation. + +Table 4: A brief summary of training recipes of Qwen2.5 Base models. + +
1.5B-Math7B-Math3B7B14B
Number of generations77121212
Learning rate3e-73e-73e-73e-73e-7
Max completion length2048204810241024768
Batch size per GPU12111
+ +# F Computational Cost of Semantic Clustering + +Given the number of responses sampled per question $G$ (i.e., the group size) and the training dataset size $N$ , the time complexity of the clustering process is $O(G^2 \times N)$ . In mathematical reasoning tasks, semantic clustering is implemented by regular expressions which do not involve notable computational cost. For natural reasoning tasks, we rely on an additional compact small language model. To evaluate the additional computational overhead introduced by semantic clustering in EMPO, we conducted comparative analyses of EMPO and GRPO in terms of total training duration and GPU memory utilization. The results of mathematical reasoning and natural reasoning are shown in Table 6, respectively. It is worthy to note that the 14B model experiments require slightly less computational time than the 7B model. This is because, in our 14B experiments, we reduced the batch size and maximum response length from 2 and 1024 to 1 and 768, respectively, compared to the 3B and 7B configurations. This adjustment was made to fit the limited GPU memory of one single $8 \times \mathrm{A}100$ 80G machine. + +Table 5: Comparison of total runtime (measured as $8 \times$ A100 GPU hours) and storage cost (measured by max total GPU memory (GiB) utilization) between GRPO and EMP0. The GPU Memory semantic cluster process requires minimal computation and storage. + +
Qwen2.5-1.5B-MathQwen2.5-7B-Math
GPU HoursGPU MemGPU HoursGPU Mem
GRPO11.2240.48.5501.3
EMPO11.7208.28.7532.7
+ +Table 6: Comparison of total runtime (measured as $8 \times$ A100 GPU hours) and storage cost (measured by total GPU memory (GiB) utilization) between GRPO and EMP0. The GPU Memory semantic cluster process requires minimal computation and storage. + +
Qwen2.5-3BQwen2.5-7BQwen2.5-14B
GPU HoursGPU MemGPU HoursGPU MemGPU HoursGPU Mem
GRPO9.5274.812.4508.611.0588.2
EMPO11.1286.914.6532.711.5541.1
+ +# G Details of Prompt Collection + +For mathematical reasoning, we directly use 20,000 prompts randomly selected from Numina-Math-CoT. For free-form natural reasoning tasks, we adopt the prompts from Natural Reasoning5 by filtering out the questions with over-long prompt, reference answer. Besides, we use the response length of Llama3.3-70B-Instruct as a difficulty estimation metric, and filter out overly difficult samples with response lengths exceeding 4096 tokens. The data collection python code is demonstrated as follow: + +Algorithm 4: Python code of data filtering in a huggingface-like style. +```python +from datasets import load_dataset +dataset = load_dataset("facebook/Natural-Reasoning") +filtered_dataset = dataset.filter( lambda x: ( + # no answer + len(x["reference_answer"])) > 0 + # over-long answer + and len(x["reference_answer"]} < 129 + # overly difficult questions + and len(x["llamaresponses"]} < 4096 + # over-long prompt + and len(x["question"]} < 512 + # proof-oriented + and ("prove" not in x["question"].lower()) + and ("proof" not in x["question"].lower()) +) +``` + +# H Additional Result about Pass@k + +We provide additional visualization pass@k results of models trained with EMP0. The results are shown as follow. As shown in Figure H, the Base model consistently catch up with RL trained models when k is large. + +# I The Influence of Clustering Quality on the Performance of EMPO + +In our mathematical reasoning experiments, semantic clustering is achieved solely through regular expression matching without introducing additional models. Due to the naturally structured response formats in mathematical tasks, regular expression could accurately determine answer equivalence, resulting in relatively high clustering quality. + +However, in more general free-form natural reasoning tasks where model responses are free-form much more diverse (e.g., matrix, numbers, a few lines of sentences/codes...), the clustering quality can impact EMPO's effectiveness. For instance, in our more early practice, we tried DeBERTa (a bert-like model with 300M parameters trained by microsoft) for semantic clustering. Due to + +![](images/2f1ddd8ff1e96a17be6f46c9ff2bb458e712e643653068eb46e98c12091fa5e9.jpg) +Figure 5: Trend of pass@k accuracy on Math test-set. + +![](images/4db2f4e321e46330bce0ead90d1173456c617c799def72823b107ed1cd146436.jpg) +Figure 6: Trend of pass@k accuracy on OMNI test-set. + +the poor quality of semantic clustering, our EMPO straggled to scale up and suffered from frequent reward hacking. Subsequently, by leveraging the general-verifier released by Tiger-Lab (a fine-tuned Qwen2.5-1.5B-Math model) for clustering, we successfully generalized EMPO to more general free-form reasoning tasks. Noted that even though this small language model undergoes supervised finetuning, it serves within our fully unsupervised framework as a fixed utility function for semantic comparison, rather than serving as an external supervisor for task-specific feedback. There are several fundamental difference between cluster model and the reward model used in supervised RL: + +- The cluster model does not evaluate output correctness relative to input queries. It just provides pairwise comparisons between the model's own outputs. That is, it only provides binary answer about "whether these two answers are the same?" rather than "which answer is better?". +- The cluster model does not provide any guidance, such as gradient information or hints on how to refine the reasoning traces. +- Compared to reward model or human-verifier golden answers, it can be much easier to implement such a cluster model. For example, in mathematical reasoning tasks, only regular expressions are enough for clustering. In natural reasoning tasks, a finetuned Qwen2.5-1B model can provide high quality semantic cluster results. + +Essentially, this is related to the non-identifiability problem in statistical inference [44]. The issue of non-identifiability arises because multiple, distinct underlying states (potential "truths," or more accurately, different reasoning pathways or different clusters of incorrect answers) could produce the same pattern of relational signals (i.e., the same semantic clustering results). \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05812/images/02778710b12d49b556848c17b7b983ca69c0dffbf0dd66e3238c493a9a9401fe.jpg b/data/2025/2504_05xxx/2504.05812/images/02778710b12d49b556848c17b7b983ca69c0dffbf0dd66e3238c493a9a9401fe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e57dd2ffa18e500e8ce3f7dfb4b6a120a977bd31 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/02778710b12d49b556848c17b7b983ca69c0dffbf0dd66e3238c493a9a9401fe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3cee984d93f6719518eb9e514c65730206128c383009bffcd24561e328cb1259 +size 18015 diff --git a/data/2025/2504_05xxx/2504.05812/images/075188f39a5ebdacf50005cb09d7e38cd4328467bddde5289c5e00402e56d320.jpg b/data/2025/2504_05xxx/2504.05812/images/075188f39a5ebdacf50005cb09d7e38cd4328467bddde5289c5e00402e56d320.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f46d9fa5a0e8eb378c87f601daf159fe7b55754 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/075188f39a5ebdacf50005cb09d7e38cd4328467bddde5289c5e00402e56d320.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ba78a135f5239fc37107f7e0ee42a4d40a740f189e809db55ee5fe225cc57e7 +size 17075 diff --git a/data/2025/2504_05xxx/2504.05812/images/1099887c44ad2b598c5b47017a4cafdffed2a0d290e926bdf6596db1d87f0f65.jpg b/data/2025/2504_05xxx/2504.05812/images/1099887c44ad2b598c5b47017a4cafdffed2a0d290e926bdf6596db1d87f0f65.jpg new file mode 100644 index 0000000000000000000000000000000000000000..941d2ec029e5c87fa9caddc61c5f53f924ce9046 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/1099887c44ad2b598c5b47017a4cafdffed2a0d290e926bdf6596db1d87f0f65.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06b846c82596cd959543c3e1371e40ce43a595693bb4ec3cdb3e5678a88fb2e0 +size 29817 diff --git a/data/2025/2504_05xxx/2504.05812/images/15968eddd9d3cb8bf970e29441620a8eaeab409e9505401226d3f1b796f26eca.jpg b/data/2025/2504_05xxx/2504.05812/images/15968eddd9d3cb8bf970e29441620a8eaeab409e9505401226d3f1b796f26eca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..27f8e6ae8ec182dcd40cd54758bb01c4062a1545 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/15968eddd9d3cb8bf970e29441620a8eaeab409e9505401226d3f1b796f26eca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c86ab3ee520f517ef4d147a081e7a35035bff7e188ee57ae538ddbdc25ec9589 +size 7656 diff --git a/data/2025/2504_05xxx/2504.05812/images/2dc1f5f6db584b1ddf1af8ee3beee09f4ec9ced9abff64f85399d729b7d2d33e.jpg b/data/2025/2504_05xxx/2504.05812/images/2dc1f5f6db584b1ddf1af8ee3beee09f4ec9ced9abff64f85399d729b7d2d33e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4fded54532dfc1a7c571c446546fd7d4b5cb97d4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/2dc1f5f6db584b1ddf1af8ee3beee09f4ec9ced9abff64f85399d729b7d2d33e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77426d59a989bfc4868c99df5eb02bf4b55dbeb2ce065fa9fd210f1b590dd589 +size 3460 diff --git a/data/2025/2504_05xxx/2504.05812/images/2f1ddd8ff1e96a17be6f46c9ff2bb458e712e643653068eb46e98c12091fa5e9.jpg b/data/2025/2504_05xxx/2504.05812/images/2f1ddd8ff1e96a17be6f46c9ff2bb458e712e643653068eb46e98c12091fa5e9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1de9860419245c7d08e1cb7abaa64044a80c79aa --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/2f1ddd8ff1e96a17be6f46c9ff2bb458e712e643653068eb46e98c12091fa5e9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ed0e6a5ab7a89c768f2751ab131646dc34fb21a93e74ba9be27cbe593047baa +size 29228 diff --git a/data/2025/2504_05xxx/2504.05812/images/2f2bfbad48ec71e771da0d9c1a85dcef5bdda49c28e044cbd20eadabff19c212.jpg b/data/2025/2504_05xxx/2504.05812/images/2f2bfbad48ec71e771da0d9c1a85dcef5bdda49c28e044cbd20eadabff19c212.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ce13032a26c20c6cbf4d878db55835a8dbbbaea --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/2f2bfbad48ec71e771da0d9c1a85dcef5bdda49c28e044cbd20eadabff19c212.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66ed044f42fbeed501c978acb0c2f415608d252794469e36d46c11b846e12bc7 +size 21482 diff --git a/data/2025/2504_05xxx/2504.05812/images/335efb518b469c6cc68204d57510d455982bcad4c3c5a141a0b1137dc465080b.jpg b/data/2025/2504_05xxx/2504.05812/images/335efb518b469c6cc68204d57510d455982bcad4c3c5a141a0b1137dc465080b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..42b0e130d4d0ea101bcaf6b8c15ebde159e5bf6f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/335efb518b469c6cc68204d57510d455982bcad4c3c5a141a0b1137dc465080b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5431fc815d4653f9ee214cba509e02ada3ff58b86d74db17bed7a855b56bd8a5 +size 33445 diff --git a/data/2025/2504_05xxx/2504.05812/images/4ab88250ac1162a0d807127085f70365256f0f1d4fbca1fd94a6cc176b53a2d6.jpg b/data/2025/2504_05xxx/2504.05812/images/4ab88250ac1162a0d807127085f70365256f0f1d4fbca1fd94a6cc176b53a2d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e567b5c9640592cacfda6d22703c410a10704dd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/4ab88250ac1162a0d807127085f70365256f0f1d4fbca1fd94a6cc176b53a2d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d33196cbf26d19adfde173ecdcffdccaace7b9dfccac8e3efe51a3f02c143a1 +size 5941 diff --git a/data/2025/2504_05xxx/2504.05812/images/4db2f4e321e46330bce0ead90d1173456c617c799def72823b107ed1cd146436.jpg b/data/2025/2504_05xxx/2504.05812/images/4db2f4e321e46330bce0ead90d1173456c617c799def72823b107ed1cd146436.jpg new file mode 100644 index 0000000000000000000000000000000000000000..25c81c8b656ed6316c54a7623363306fbbba97c6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/4db2f4e321e46330bce0ead90d1173456c617c799def72823b107ed1cd146436.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:994d4f7f56de2002d9b34f3be56323562bf510744bfc1a386b627927cb3e0f40 +size 31625 diff --git a/data/2025/2504_05xxx/2504.05812/images/4ef9f197d8e943cb755d6ac18d94d533d69a071ce7d5d85c4d3749ba340c60ae.jpg b/data/2025/2504_05xxx/2504.05812/images/4ef9f197d8e943cb755d6ac18d94d533d69a071ce7d5d85c4d3749ba340c60ae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a6b25264fdf7eb35fee1916de0aa8807b6bc7fa7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/4ef9f197d8e943cb755d6ac18d94d533d69a071ce7d5d85c4d3749ba340c60ae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7bc0ace4709718e14fea493dd79afe5d453c9b08d995ed71d3bdf6eb92ed7e0 +size 142337 diff --git a/data/2025/2504_05xxx/2504.05812/images/500ae90fc55e78382e716a769af6e3fc959b41fea3452ae5163b8e88a0534184.jpg b/data/2025/2504_05xxx/2504.05812/images/500ae90fc55e78382e716a769af6e3fc959b41fea3452ae5163b8e88a0534184.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e0c51f0cf8a7ede81ed4344ec8ca8cd863052371 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/500ae90fc55e78382e716a769af6e3fc959b41fea3452ae5163b8e88a0534184.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25b343cd13b5bddd1582535ebda9e3b28ef80d39b64e755e284d2fd7872aaef0 +size 33725 diff --git a/data/2025/2504_05xxx/2504.05812/images/54ea7ba11ac679ed9b70c45139b15bb0c494d20f9cfe7b9ba7e519498e1d1d00.jpg b/data/2025/2504_05xxx/2504.05812/images/54ea7ba11ac679ed9b70c45139b15bb0c494d20f9cfe7b9ba7e519498e1d1d00.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6604a070bc05791037b50747dac7e86135d86c0c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/54ea7ba11ac679ed9b70c45139b15bb0c494d20f9cfe7b9ba7e519498e1d1d00.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c568147b9f34b45936e2f7d6578d9cbe5e8f021c7a4f4b92cdbf6308252d9eea +size 15625 diff --git a/data/2025/2504_05xxx/2504.05812/images/60a4a919becdc853bd38aaa5ce700b90b4c33d2b4e994f1e89f907e7218a2031.jpg b/data/2025/2504_05xxx/2504.05812/images/60a4a919becdc853bd38aaa5ce700b90b4c33d2b4e994f1e89f907e7218a2031.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dcd02c11cb94cd1a1617ec8d13b6ff1d3f77e8fd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/60a4a919becdc853bd38aaa5ce700b90b4c33d2b4e994f1e89f907e7218a2031.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8135f2d2f90140a3da0efc9da6c8b38431c3ebcec01c3332d990ccee3d462549 +size 79657 diff --git a/data/2025/2504_05xxx/2504.05812/images/60bb097db42d16f1849c828d22d75124301e0a1142c86a96f7d7e2b0c7a98f8f.jpg b/data/2025/2504_05xxx/2504.05812/images/60bb097db42d16f1849c828d22d75124301e0a1142c86a96f7d7e2b0c7a98f8f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3e7a766d1cb09a201a1c2cc2dd57919ff6fc83fb --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/60bb097db42d16f1849c828d22d75124301e0a1142c86a96f7d7e2b0c7a98f8f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9620eac7aaf59377177350d0ac16150a7410c94faa0543a504b62da4725d3354 +size 43384 diff --git a/data/2025/2504_05xxx/2504.05812/images/7ef4529cd34fc2a7296699d3537746185e5b5d29e53f96418a1d55e307d2a293.jpg b/data/2025/2504_05xxx/2504.05812/images/7ef4529cd34fc2a7296699d3537746185e5b5d29e53f96418a1d55e307d2a293.jpg new file mode 100644 index 0000000000000000000000000000000000000000..666f00d8f991d0b0e144e04ba55e501040188c57 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/7ef4529cd34fc2a7296699d3537746185e5b5d29e53f96418a1d55e307d2a293.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8a1d17332ec5da799f7b83384a3316bb533fbb501b677e542348f7241edd70c +size 4593 diff --git a/data/2025/2504_05xxx/2504.05812/images/8edeaff50727c23da569f7a51a0f7793462c0fe8ed43158b6657fadb58d197a8.jpg b/data/2025/2504_05xxx/2504.05812/images/8edeaff50727c23da569f7a51a0f7793462c0fe8ed43158b6657fadb58d197a8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..34c7e8ab5f153bc65be63cfb44a3426397298a6d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/8edeaff50727c23da569f7a51a0f7793462c0fe8ed43158b6657fadb58d197a8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8a0364e7cd882af26dfbb2804f184d291fe7fe5ee3f90d97d91c9fae036de1f +size 12686 diff --git a/data/2025/2504_05xxx/2504.05812/images/9954ebe5c2ce338a3ab700264b280f56e5763e1ff349dfed0fa4106fd524e1d7.jpg b/data/2025/2504_05xxx/2504.05812/images/9954ebe5c2ce338a3ab700264b280f56e5763e1ff349dfed0fa4106fd524e1d7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1ab4631742110a795001bc69bffdbf9f10a75452 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/9954ebe5c2ce338a3ab700264b280f56e5763e1ff349dfed0fa4106fd524e1d7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35bcfdcb7ef42aa3cc211847344284de84b55f4d7e2f19467fe958616ec73f9c +size 32248 diff --git a/data/2025/2504_05xxx/2504.05812/images/a9c4e1f59141457527b28b6651a96305b912721bea20a8fb0468b3b8a72138f4.jpg b/data/2025/2504_05xxx/2504.05812/images/a9c4e1f59141457527b28b6651a96305b912721bea20a8fb0468b3b8a72138f4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6f69305ec517d8a786cef826fd912891e546d0c7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/a9c4e1f59141457527b28b6651a96305b912721bea20a8fb0468b3b8a72138f4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c00f906dd9d322cabfe2bca544d7b94d886fde001e0083aa5a520bf841dd41e2 +size 6493 diff --git a/data/2025/2504_05xxx/2504.05812/images/aa06a11c8f9c83fa8d330c1d96e3028040afc34a993a9be0d8c03a9e8b3bbd9e.jpg b/data/2025/2504_05xxx/2504.05812/images/aa06a11c8f9c83fa8d330c1d96e3028040afc34a993a9be0d8c03a9e8b3bbd9e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a309556c96be7a3ecd4794505ffeae152b3ff0a0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/aa06a11c8f9c83fa8d330c1d96e3028040afc34a993a9be0d8c03a9e8b3bbd9e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f651d1245f5374b7b445190d090e3a2d80cc188eebfb91ca06022996bd55458 +size 2784 diff --git a/data/2025/2504_05xxx/2504.05812/images/bcab8cb78366e87f8dd8e4a498f6c241b7dd8324c9b304d720703a5b37b80b08.jpg b/data/2025/2504_05xxx/2504.05812/images/bcab8cb78366e87f8dd8e4a498f6c241b7dd8324c9b304d720703a5b37b80b08.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ce8b8f02c8a1c4a6aed7ca95d0fd71b676761296 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/bcab8cb78366e87f8dd8e4a498f6c241b7dd8324c9b304d720703a5b37b80b08.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c94bf55a165cbf9f83a516f77720c31169f5b0c5bad8090fe9876b1ea899ca1b +size 24473 diff --git a/data/2025/2504_05xxx/2504.05812/images/ec259decfe03e3ddd26b93a2e485abc49110d57bd28f7e43ad303e1f7e55c511.jpg b/data/2025/2504_05xxx/2504.05812/images/ec259decfe03e3ddd26b93a2e485abc49110d57bd28f7e43ad303e1f7e55c511.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f5c022a113f7b255f041effee9333293f0cbe01 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/ec259decfe03e3ddd26b93a2e485abc49110d57bd28f7e43ad303e1f7e55c511.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad89728cbda34df2923b8c3735999b7261414aea13002e4ebc98dffd208f412b +size 13199 diff --git a/data/2025/2504_05xxx/2504.05812/images/f37dc763d49cd9c8b442af61394897d5e9ce45dc8a0826fb26764cd9be5a1a7a.jpg b/data/2025/2504_05xxx/2504.05812/images/f37dc763d49cd9c8b442af61394897d5e9ce45dc8a0826fb26764cd9be5a1a7a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e21b33f038d52c84640c7e559a0b93381434605f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/f37dc763d49cd9c8b442af61394897d5e9ce45dc8a0826fb26764cd9be5a1a7a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bae16c26f73dcd9c2e81a8ea9524b8bd1c21facbf0d18fa474cee789f5c00bb +size 4386 diff --git a/data/2025/2504_05xxx/2504.05812/images/f93c2ee71c0adf195cf26c5902acef4260f08f79adba738f3dcae5c452ddfc31.jpg b/data/2025/2504_05xxx/2504.05812/images/f93c2ee71c0adf195cf26c5902acef4260f08f79adba738f3dcae5c452ddfc31.jpg new file mode 100644 index 0000000000000000000000000000000000000000..654bc45d03226f39ee653006898f6e32a3303699 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/f93c2ee71c0adf195cf26c5902acef4260f08f79adba738f3dcae5c452ddfc31.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f9fc25a09118d2500c9cd20b89d4e2f84273bb016fd54f40f17e64c3442c6ca +size 16314 diff --git a/data/2025/2504_05xxx/2504.05812/images/fc80a9084de5033e815e2520f330fdf2683f45aaa0f8388c48366e9e8069a1d9.jpg b/data/2025/2504_05xxx/2504.05812/images/fc80a9084de5033e815e2520f330fdf2683f45aaa0f8388c48366e9e8069a1d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2b5359dd762bdb0ce46f6bf836365a08f48cffe8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/images/fc80a9084de5033e815e2520f330fdf2683f45aaa0f8388c48366e9e8069a1d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5ef78278a00985ab782084566555c43f1d27d264f4938c09560f41d175670ef +size 114708 diff --git a/data/2025/2504_05xxx/2504.05812/layout.json b/data/2025/2504_05xxx/2504.05812/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..dc5254637c95019f54ec9ae528aec2aa79ac9fc2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05812/layout.json @@ -0,0 +1,12383 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 113, + 97, + 497, + 137 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 97, + 497, + 137 + ], + "spans": [ + { + "bbox": [ + 113, + 97, + 497, + 137 + ], + "type": "text", + "content": "Right Question is Already Half the Answer: Fully Unsupervised LLM Reasoning Incentivization" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 147, + 178, + 223, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 178, + 223, + 190 + ], + "spans": [ + { + "bbox": [ + 147, + 178, + 223, + 190 + ], + "type": "text", + "content": "Qingyang Zhang" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 147, + 191, + 222, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 191, + 222, + 202 + ], + "spans": [ + { + "bbox": [ + 147, + 191, + 222, + 202 + ], + "type": "text", + "content": "Tianjin University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 280, + 179, + 328, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 179, + 328, + 190 + ], + "spans": [ + { + "bbox": [ + 280, + 179, + 328, + 190 + ], + "type": "text", + "content": "Haitao Wu" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 267, + 191, + 340, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 267, + 191, + 340, + 202 + ], + "spans": [ + { + "bbox": [ + 267, + 191, + 340, + 202 + ], + "type": "text", + "content": "Tianjin University" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 385, + 179, + 464, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 385, + 179, + 464, + 191 + ], + "spans": [ + { + "bbox": [ + 385, + 179, + 464, + 191 + ], + "type": "text", + "content": "Changqing Zhang" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 388, + 191, + 462, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 191, + 462, + 202 + ], + "spans": [ + { + "bbox": [ + 388, + 191, + 462, + 202 + ], + "type": "text", + "content": "Tianjin University" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 190, + 219, + 241, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 219, + 241, + 228 + ], + "spans": [ + { + "bbox": [ + 190, + 219, + 241, + 228 + ], + "type": "text", + "content": "Peilin Zhao" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 184, + 230, + 247, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 230, + 247, + 239 + ], + "spans": [ + { + "bbox": [ + 184, + 230, + 247, + 239 + ], + "type": "text", + "content": "Tencent AI Lab" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 354, + 219, + 402, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 354, + 219, + 402, + 228 + ], + "spans": [ + { + "bbox": [ + 354, + 219, + 402, + 228 + ], + "type": "text", + "content": "Yatao Bian" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 330, + 230, + 426, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 230, + 426, + 239 + ], + "spans": [ + { + "bbox": [ + 330, + 230, + 426, + 239 + ], + "type": "text", + "content": "Tencent AI Lab & NUS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 281, + 269, + 329, + 282 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 269, + 329, + 282 + ], + "spans": [ + { + "bbox": [ + 281, + 269, + 329, + 282 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 140, + 293, + 470, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 293, + 470, + 447 + ], + "spans": [ + { + "bbox": [ + 140, + 293, + 470, + 447 + ], + "type": "text", + "content": "Existing methods to enhance the reasoning capability of large language models predominantly rely on supervised fine-tuning (SFT) followed by reinforcement learning (RL) on reasoning-specific data. These approaches critically depend on external supervisions—such as labeled reasoning traces, verified golden answers, or pre-trained reward models. In this work, we propose Entropy Minimized Policy Optimization (EMPO), which makes an early attempt at fully unsupervised LLM reasoning incentivization. By continuously minimizing the predictive entropy of LLMs on unlabeled questions in a latent semantic space, EMP0 achieves competitive performance compared to supervised counterparts on both mathematical and freeform natural reasoning tasks. Specifically, without any supervised signals, EMP0 boosts the accuracy of Qwen2.5-Math-7B Base from " + }, + { + "bbox": [ + 140, + 293, + 470, + 447 + ], + "type": "inline_equation", + "content": "30.7\\%" + }, + { + "bbox": [ + 140, + 293, + 470, + 447 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 140, + 293, + 470, + 447 + ], + "type": "inline_equation", + "content": "48.1\\%" + }, + { + "bbox": [ + 140, + 293, + 470, + 447 + ], + "type": "text", + "content": " on mathematical benchmarks and improves the accuracy of Qwen2.5-7B Base from " + }, + { + "bbox": [ + 140, + 293, + 470, + 447 + ], + "type": "inline_equation", + "content": "32.1\\%" + }, + { + "bbox": [ + 140, + 293, + 470, + 447 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 140, + 293, + 470, + 447 + ], + "type": "inline_equation", + "content": "50.1\\%" + }, + { + "bbox": [ + 140, + 293, + 470, + 447 + ], + "type": "text", + "content": " on MMLU-Pro. Primary experiments and analysis are also provided to interpret the effectiveness of EMP0. Code is available at this url." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 466, + 192, + 478 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 466, + 192, + 478 + ], + "spans": [ + { + "bbox": [ + 105, + 466, + 192, + 478 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 490, + 306, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 490, + 306, + 632 + ], + "spans": [ + { + "bbox": [ + 104, + 490, + 306, + 632 + ], + "type": "text", + "content": "Large language models (LLMs) have demonstrated exceptional potential in challenging tasks such as mathematical reasoning [1, 2, 3] and code generation [4]. A prevailing paradigm for training reasoning LLMs involves firstly performing supervised fine-tuning (SFT) and then reinforcement learning (RL), or iterative combinations of both, applied to reasoning-specific datasets after pretraining [5]. Unfortunately, these methods typically depend on large-scale reasoning datasets with various forms of supervised information, such as human-labeled reasoning traces, verified golden answers, or an additional pre-trained re" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 632, + 504, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 504, + 666 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 504, + 666 + ], + "type": "text", + "content": "ward model. As a consequence, endowing LLMs with powerful reasoning capability through human experts is becoming increasingly time-consuming and costly, which greatly limits the scalability and broader adoption of reasoning models." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 670, + 506, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 670, + 506, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 670, + 506, + 715 + ], + "type": "text", + "content": "To mitigate this, previous work employs self-consistency to construct pseudo data and deploy supervised finetuning for better performance [6]. However, the performance improvement is limited and under risks of model collapse [7]. Recent advancements, such as the pioneering work PFPO [8], frame the labeling of solutions as evaluation against test cases and then leverage self-consistency" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 322, + 506, + 491, + 598 + ], + "blocks": [ + { + "bbox": [ + 322, + 506, + 491, + 598 + ], + "lines": [ + { + "bbox": [ + 322, + 506, + 491, + 598 + ], + "spans": [ + { + "bbox": [ + 322, + 506, + 491, + 598 + ], + "type": "image", + "image_path": "02778710b12d49b556848c17b7b983ca69c0dffbf0dd66e3238c493a9a9401fe.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 311, + 602, + 504, + 624 + ], + "lines": [ + { + "bbox": [ + 311, + 602, + 504, + 624 + ], + "spans": [ + { + "bbox": [ + 311, + 602, + 504, + 624 + ], + "type": "text", + "content": "Figure 1: Improvement of the proposed method on Qwen2.5-7B and Qwen2.5-7B-Math model." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 202, + 37, + 560 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 202, + 37, + 560 + ], + "spans": [ + { + "bbox": [ + 14, + 202, + 37, + 560 + ], + "type": "text", + "content": "arXiv:2504.05812v3 [cs.LG] 18 May 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 731, + 139, + 742 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 731, + 139, + 742 + ], + "spans": [ + { + "bbox": [ + 105, + 731, + 139, + 742 + ], + "type": "text", + "content": "Preprint." + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 150 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 150 + ], + "type": "text", + "content": "to generate pseudo test cases. Despite the promising results, the proposed method still necessitates supervision from instruction finetuning data and supervision signals from the frontier LLMs to initialize the RL process. Another more recent work [9] introduces a two-stage framework to construct self-rewarding reasoning models using self-generated data followed by RL. Despite the superior performance, the proposed method relies on a ground-truth verifier to obtain self-correction reasoning traces by rejection sampling. These approaches inspire our exploration of a critical open question: How can we incentivize LLM reasoning capacities in a fully unsupervised manner?" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 111, + 161, + 499, + 244 + ], + "blocks": [ + { + "bbox": [ + 111, + 161, + 499, + 244 + ], + "lines": [ + { + "bbox": [ + 111, + 161, + 499, + 244 + ], + "spans": [ + { + "bbox": [ + 111, + 161, + 499, + 244 + ], + "type": "image", + "image_path": "500ae90fc55e78382e716a769af6e3fc959b41fea3452ae5163b8e88a0534184.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 228, + 251, + 367, + 259 + ], + "lines": [ + { + "bbox": [ + 228, + 251, + 367, + 259 + ], + "spans": [ + { + "bbox": [ + 228, + 251, + 367, + 259 + ], + "type": "text", + "content": "(a) Comparison of different RL methods" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 111, + 262, + 499, + 370 + ], + "blocks": [ + { + "bbox": [ + 111, + 262, + 499, + 370 + ], + "lines": [ + { + "bbox": [ + 111, + 262, + 499, + 370 + ], + "spans": [ + { + "bbox": [ + 111, + 262, + 499, + 370 + ], + "type": "image", + "image_path": "60bb097db42d16f1849c828d22d75124301e0a1142c86a96f7d7e2b0c7a98f8f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 257, + 373, + 338, + 383 + ], + "lines": [ + { + "bbox": [ + 257, + 373, + 338, + 383 + ], + "spans": [ + { + "bbox": [ + 257, + 373, + 338, + 383 + ], + "type": "text", + "content": "(b) Overview of EMPO" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 397, + 504, + 475 + ], + "lines": [ + { + "bbox": [ + 104, + 397, + 504, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 397, + 504, + 475 + ], + "type": "text", + "content": "Figure 2: Overview of the proposed method. (a) Previous method like PPO [10] or GRPO [11] typically relies on external supervised signals, e.g., a pretrained reward model or golden answers. (b) The proposed Entropy Minimized Policy Optimization (EMPO) samples a set of responses from the current policy model, and then builds semantic clusters according to their equivalence. By continuously minimizing the entropy at a meaning level, our method achieves competitive benchmark performance without any external supervision, i.e., rule-based reward, pre-defined test cases or an pre-trained reward model." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 491, + 506, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 491, + 506, + 657 + ], + "spans": [ + { + "bbox": [ + 104, + 491, + 506, + 657 + ], + "type": "text", + "content": "Recent advanced DeepSeek-R1-Zero [12] demonstrates robust reasoning capabilities without dependency on SFT data. By directly initiating RL from the base model, DeepSeek-R1-Zero autonomously evolves sophisticated reasoning behaviors such as reflection and self-critic by exploring the reward signals provided by rule-based rewards. i.e., verified golden answers or an additional pre-trained reward model. Inspired by the success of DeepSeek-R1-Zero, our motivation is to devise a fully unsupervised approach for powerful reasoning capability. Specifically, we propose a novel reinforcement learning algorithm termed as Entropy Minimized Policy Optimization (EMP0), which incentivizes the reasoning capability of LLMs in a fully unsupervised manner by minimizing their predictive entropy in a latent semantic space. This method optimizes the model to favor reasoning traces yielding consistent answers, enhancing output reliability. The semantic entropy objective we propose to minimize is a well-established measurement of LLMs' uncertainty, which extends beyond mathematical reasoning to free-form question-answering tasks. We further introduce entropy thresholding to filter unreliable reasoning traces, stabilizing the unsupervised training process. Experiments on various tasks including mathematical reasoning and free-form natural reasoning are conducted to validate the proposed method. Our contributions are summarized as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 664, + 504, + 723 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 132, + 664, + 504, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 664, + 504, + 696 + ], + "spans": [ + { + "bbox": [ + 132, + 664, + 504, + 696 + ], + "type": "text", + "content": "- We propose an effective and principled strategy called Entropy-Minimized Policy Optimization (EMPO) for incentivizing the reasoning capabilities of LLMs in a fully unsupervised manner." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 132, + 700, + 504, + 723 + ], + "type": "text", + "content": "- We establish semantic entropy as a potent intrinsic reward signal for guiding LLM reasoning. Our analysis confirms a strong negative correlation between semantic entropy and model" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 140, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 140, + 72, + 504, + 95 + ], + "type": "text", + "content": "accuracy, validating its efficacy as a robust, unsupervised optimization objective that drives models towards generating more consistent and reliable outputs." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 132, + 102, + 506, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 102, + 506, + 180 + ], + "spans": [ + { + "bbox": [ + 132, + 102, + 506, + 180 + ], + "type": "text", + "content": "- Experiments on both math reasoning tasks with deterministic golden answers and freeform natural reasoning tasks are conducted to validate the efficacy and versatility of EMP0. Additionally, we provide critical insights into EMP0's mechanism, demonstrating that its effectiveness stems from an enhanced ability to consistently select and prioritize strong, pre-existing reasoning pathways learned during pre-training, rather than instilling fundamentally new reasoning skills. This underscores EMP0's strength in efficiently eliciting and refining latent capabilities within base models." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 201, + 198, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 201, + 198, + 213 + ], + "spans": [ + { + "bbox": [ + 105, + 201, + 198, + 213 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 228, + 506, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 228, + 506, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 228, + 506, + 392 + ], + "type": "text", + "content": "Self-Supervised and Semi-Supervised Reasoning. To address the dependency on labeled data, several self-supervised and unsupervised methods have emerged. Huang et al. [6] propose a self-improvement framework where LLMs generate high-confidence answers using Chain-of-Thought (CoT) prompting and self-consistency, subsequently fine-tuning on these pseudo-labels. However, the performance gains are often limited, and there is a risk of model collapse, as noted in [7]. Recently, Patel et al. [13] apply self-improvement to web navigation tasks in WebArena, fine-tuning on synthetic data generated by the model itself. Li et al. [14] enhance long-context reasoning via SeaLong, sampling multiple outputs and optimizing with Minimum Bayes Risk. These methods, while reducing reliance on external labels, still involve supervised fine-tuning steps, contrasting with EMPO's fully unsupervised RL approach. A concurrent work, i.e., test-time reinforcement learning (TTRL) [15] directly obtains pseudo label by major voting and then conducts RL on test prompts at inference time, whereas our EMPO strictly maintains the separation between training and testing phases for ensuring that the model remains unexposed to any test prompts during training. Furthermore, while TTRL is currently limited to mathematical tasks, our approach is applicable to more general free-form reasoning tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 409, + 506, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 409, + 506, + 574 + ], + "spans": [ + { + "bbox": [ + 104, + 409, + 506, + 574 + ], + "type": "text", + "content": "Self-Rewarding and RL-based Reasoning. RL has become a prominent technique for enhancing LLM reasoning, often leveraging external or self-generated rewards. Yuan et al. [16] propose using the LLM itself via LLM-as-a-Judge prompting to provide rewards during training, reducing reliance on human feedback. Similarly, Xiong et al. [9] propose a two-stage self-rewarding framework for mathematical reasoning, generating data and applying RL with a ground-truth verifier for self-correction, achieving superior performance but requiring supervised signals. Jiao et al. [8] frame solution labeling as evaluation against test cases, yet still rely on instruction fine-tuning and frontier LLM signals for RL initialization. Wen et al. [17] introduce Entropy-Regularized Token-Level Policy Optimization (ETPO), augmenting RL with an entropy bonus to promote exploration, differing from EMP0's entropy minimization focus. Guo et al. [12] with DeepSeek-R1 demonstrate robust reasoning via RL from a base model, using rule-based rewards. Xi et al. [18] present " + }, + { + "bbox": [ + 104, + 409, + 506, + 574 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^3" + }, + { + "bbox": [ + 104, + 409, + 506, + 574 + ], + "type": "text", + "content": ", a reverse curriculum RL approach using outcome supervision to mimic process supervision benefits. Wang et al. [19] propose CREAM, which enforces consistency regularization between internal reward models during self-training. These methods highlight a spectrum of supervision levels, positioning EMP0 as unique in its fully unsupervised nature, leveraging semantic entropy as an internal reward." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 590, + 506, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 590, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 506, + 721 + ], + "type": "text", + "content": "Entropy Minimization and Semantic Consistency. Entropy minimization is a well-established technique in semi-supervised and unsupervised learning, with roots in traditional machine learning. Grandvalet and Bengio [20] demonstrate that minimizing entropy on unlabeled data can improve classification accuracy by encouraging model confidence. Test-time adaptation methods like Tent [21] adapt models to new domains by minimizing entropy on test data, filling domain gaps without additional labels. More recent work, COME, [22] extends this principle to conservative entropy minimization for robust adaptation. These approaches highlight the potential of entropy minimization as an unsupervised objective, which EMP0 leverages for LLM reasoning by extending it to semantic entropy [23] in a latent space. Farquhar et al. [24] further validate semantic entropy's utility in detecting hallucinations, reinforcing its relevance. Kharitonov et al. [25] explore entropy minimization in emergent languages, finding it naturally aligns with successful communication, providing additional theoretical foundation for EMP0." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 167, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 167, + 83 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 167, + 83 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 95, + 504, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 95, + 504, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 95, + 504, + 140 + ], + "type": "text", + "content": "We propose an RL-based method to minimize the entropy of LLM generations in a latent semantic space for incentivizing its reasoning capability. We term our method Entropy-Minimized Policy Optimization (EMPO), which is devised in a fully unsupervised manner without any forms of external supervised information." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 152, + 188, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 152, + 188, + 163 + ], + "spans": [ + { + "bbox": [ + 105, + 152, + 188, + 163 + ], + "type": "text", + "content": "3.1 Preliminaries" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 171, + 504, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 171, + 504, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 171, + 504, + 228 + ], + "type": "text", + "content": "Recent advancements in reinforcement learning have demonstrated remarkable breakthroughs in enhancing the reasoning capabilities of LLMs. Taking the representative RL technique Group Relative Policy Optimization (GRPO) [11] used by DeepSeek-R1-Zero [12] as an example. GRPO first samples a group of outputs " + }, + { + "bbox": [ + 104, + 171, + 504, + 228 + ], + "type": "inline_equation", + "content": "\\{o_1, \\dots, o_G\\}" + }, + { + "bbox": [ + 104, + 171, + 504, + 228 + ], + "type": "text", + "content": " from the policy model " + }, + { + "bbox": [ + 104, + 171, + 504, + 228 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 104, + 171, + 504, + 228 + ], + "type": "text", + "content": " and then optimizes it by maximizing the following objective:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 159, + 232, + 505, + 281 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 232, + 505, + 281 + ], + "spans": [ + { + "bbox": [ + 159, + 232, + 505, + 281 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {J} _ {\\mathrm {G R P O}} = \\mathbb {E} _ {[ q \\sim P (Q), \\{o _ {i} \\} _ {i = 1} \\sim \\pi_ {\\theta (O | q)} ]} \\\\ \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\left(\\min \\left(A _ {i}, \\operatorname {c l i p} (1, 1 - \\epsilon , 1 + \\epsilon) A _ {i}\\right) - \\beta K L \\left(\\pi_ {\\theta} \\mid \\mid \\pi_ {r e f}\\right) \\right. \\right], \\tag {1} \\\\ \\end{array}", + "image_path": "ec259decfe03e3ddd26b93a2e485abc49110d57bd28f7e43ad303e1f7e55c511.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 284, + 506, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 284, + 506, + 334 + ], + "spans": [ + { + "bbox": [ + 104, + 284, + 506, + 334 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 284, + 506, + 334 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 284, + 506, + 334 + ], + "type": "text", + "content": " is a hyper-parameter which avoids the policy model to diverge too far away from the reference model " + }, + { + "bbox": [ + 104, + 284, + 506, + 334 + ], + "type": "inline_equation", + "content": "\\pi_{ref}" + }, + { + "bbox": [ + 104, + 284, + 506, + 334 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 284, + 506, + 334 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 284, + 506, + 334 + ], + "type": "text", + "content": " clips extreme advantages for stability. " + }, + { + "bbox": [ + 104, + 284, + 506, + 334 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 284, + 506, + 334 + ], + "type": "text", + "content": " is the number of samples in one group. " + }, + { + "bbox": [ + 104, + 284, + 506, + 334 + ], + "type": "inline_equation", + "content": "A_{i}" + }, + { + "bbox": [ + 104, + 284, + 506, + 334 + ], + "type": "text", + "content": " is the advantage computed by normalizing the rewards within each group as " + }, + { + "bbox": [ + 104, + 284, + 506, + 334 + ], + "type": "inline_equation", + "content": "A_{i} = \\frac{r_{i} - mean(\\{r_{1},\\cdots,r_{G}\\})}{std(r_{1},\\cdots,r_{G})}" + }, + { + "bbox": [ + 104, + 284, + 506, + 334 + ], + "type": "text", + "content": ". In math reasoning task, the reward can be computed by predefined rules:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 229, + 338, + 505, + 365 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 338, + 505, + 365 + ], + "spans": [ + { + "bbox": [ + 229, + 338, + 505, + 365 + ], + "type": "interline_equation", + "content": "r _ {i} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f v e r i f i e r} (o _ {i}, a) = \\text {T r u e} \\\\ 0 & \\text {o t h e r w i s e} \\end{array} , \\right. \\tag {2}", + "image_path": "a9c4e1f59141457527b28b6651a96305b912721bea20a8fb0468b3b8a72138f4.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 369, + 506, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 369, + 506, + 381 + ], + "spans": [ + { + "bbox": [ + 104, + 369, + 506, + 381 + ], + "type": "text", + "content": "where a verifier is used to determine the correctness of " + }, + { + "bbox": [ + 104, + 369, + 506, + 381 + ], + "type": "inline_equation", + "content": "o_i" + }, + { + "bbox": [ + 104, + 369, + 506, + 381 + ], + "type": "text", + "content": " by comparing it with the golden answer " + }, + { + "bbox": [ + 104, + 369, + 506, + 381 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 369, + 506, + 381 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 385, + 504, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 385, + 504, + 451 + ], + "spans": [ + { + "bbox": [ + 104, + 385, + 504, + 451 + ], + "type": "text", + "content": "Unlike the above example, we consider fully unsupervised optimization settings where there are no golden answers to verify the correctness of model predictions. In this circumstance, we only have unlabeled reasoning problems " + }, + { + "bbox": [ + 104, + 385, + 504, + 451 + ], + "type": "inline_equation", + "content": "P(Q)" + }, + { + "bbox": [ + 104, + 385, + 504, + 451 + ], + "type": "text", + "content": ". Such problems were freely raised by users during the deployment of LLMs. Given a pre-training LLM " + }, + { + "bbox": [ + 104, + 385, + 504, + 451 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 104, + 385, + 504, + 451 + ], + "type": "text", + "content": " parameterized by " + }, + { + "bbox": [ + 104, + 385, + 504, + 451 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 385, + 504, + 451 + ], + "type": "text", + "content": ", our goal is to enhance its reasoning ability by only utilizing the unlabeled user problems " + }, + { + "bbox": [ + 104, + 385, + 504, + 451 + ], + "type": "inline_equation", + "content": "\\{q_i\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 385, + 504, + 451 + ], + "type": "text", + "content": ", which requests minimized cost of data collection." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 464, + 311, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 464, + 311, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 311, + 475 + ], + "type": "text", + "content": "3.2 Semantic Entropy Minimization Objective" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 483, + 505, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 483, + 505, + 605 + ], + "spans": [ + { + "bbox": [ + 104, + 483, + 505, + 605 + ], + "type": "text", + "content": "Entropy is a classical unsupervised objective in the traditional semi-supervised and unsupervised learning fields [20, 26]. Previous works in computer vision show that by continuously minimizing the entropy on unlabeled samples after pre-training, the classification accuracy of machine learning models can be significantly improved to fill the domain gaps [21, 22]. The basic intuition behind entropy minimization is that a robust model should not only fit labeled data well but also make confident and consistent predictions on unlabeled data. This principle encourages the model to avoid ambiguity and make decisive predictions, thereby enhances generalization. In this work, we choose semantic entropy [23] as our unsupervised optimization objective, which is a natural extension of classical Shannon entropy specified for large language models. Intuitively speaking, minimizing semantic entropy encourages the LLMs' outputs to be more consistent in semantic level rather than format, and thus the final answers are expected to be more reliable." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 609, + 505, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 609, + 505, + 685 + ], + "spans": [ + { + "bbox": [ + 104, + 609, + 505, + 685 + ], + "type": "text", + "content": "Specifically, semantic entropy first samples a group of outputs " + }, + { + "bbox": [ + 104, + 609, + 505, + 685 + ], + "type": "inline_equation", + "content": "\\{o_1,\\dots ,o_G\\}" + }, + { + "bbox": [ + 104, + 609, + 505, + 685 + ], + "type": "text", + "content": " and then clusters the output sequences according to their meaning. That is, if two outputs share the same meaning (i.e., they are bidirectionally entailed), they should be merged into one same cluster in the semantic space. This can be done without notable computational cost by predefined rules such as N-gram, regular expressions or an additional small language model. Once built such a set of meaning clusters " + }, + { + "bbox": [ + 104, + 609, + 505, + 685 + ], + "type": "inline_equation", + "content": "\\{c\\}" + }, + { + "bbox": [ + 104, + 609, + 505, + 685 + ], + "type": "text", + "content": " in semantic space, we then approximate the probability over the meanings as the proportion of sampled answers as" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 266, + 685, + 504, + 699 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 266, + 685, + 504, + 699 + ], + "spans": [ + { + "bbox": [ + 266, + 685, + 504, + 699 + ], + "type": "interline_equation", + "content": "p \\left(c _ {j} \\mid x\\right) \\approx \\left| c _ {j} \\right| / G, \\tag {3}", + "image_path": "2dc1f5f6db584b1ddf1af8ee3beee09f4ec9ced9abff64f85399d729b7d2d33e.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "inline_equation", + "content": "c_{j} \\in \\{c\\}" + }, + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "text", + "content": "-th meaning cluster. " + }, + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "inline_equation", + "content": "|c_{j}|" + }, + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "text", + "content": " denotes the numbers of outputs that belong to " + }, + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "inline_equation", + "content": "c_{j}" + }, + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "text", + "content": ". Finally, given question " + }, + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "text", + "content": ", the semantic entropy (denoted as " + }, + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "text", + "content": ") over the model's output meanings" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 267, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 267, + 83 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 267, + 83 + ], + "type": "text", + "content": "distribution can be estimated as follows" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 236, + 85, + 505, + 112 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 85, + 505, + 112 + ], + "spans": [ + { + "bbox": [ + 236, + 85, + 505, + 112 + ], + "type": "interline_equation", + "content": "H = - \\sum_ {c _ {j} \\in \\{c \\}} p (c _ {j} | q) \\log p (c _ {j} | q). \\tag {4}", + "image_path": "4ab88250ac1162a0d807127085f70365256f0f1d4fbca1fd94a6cc176b53a2d6.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 115, + 506, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 115, + 506, + 160 + ], + "spans": [ + { + "bbox": [ + 104, + 115, + 506, + 160 + ], + "type": "text", + "content": "As proven by previous work, semantic entropy has a strong negative relationship with model accuracy, which can be used as an efficient measurement to detect unreliable LLM generations such as confabulation and hallucination [23, 24]. Motivated by this, we propose to leverage semantic entropy as an unsupervised optimization objective for incentivizing the reasoning capability of LLM." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 171, + 303, + 183 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 171, + 303, + 183 + ], + "spans": [ + { + "bbox": [ + 105, + 171, + 303, + 183 + ], + "type": "text", + "content": "3.3 Entropy-Minimized Policy Optimization" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "spans": [ + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "type": "text", + "content": "We propose Entropy-Minimized Policy Optimization (EMPO), an RL-based method that optimizes the pre-trained large language model " + }, + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "type": "text", + "content": " to favor low semantic entropy responses given unlabeled user questions " + }, + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "type": "inline_equation", + "content": "\\{q_i\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "type": "text", + "content": ". Given input questions, EMPO incentivizes the outputs that belong to higher probability meaning cluster, and thus minimizes the semantic entropy over the meaning distribution. Specifically, given a question " + }, + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "type": "text", + "content": ", our EMPO first samples a group of output " + }, + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "type": "inline_equation", + "content": "\\{o_1, \\ldots, o_G\\}" + }, + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "type": "text", + "content": " from the current model " + }, + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "type": "text", + "content": " and then merges them into a set of " + }, + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "type": "text", + "content": " meaning clusters " + }, + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "type": "inline_equation", + "content": "\\{c_1, \\ldots, c_M\\}" + }, + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "type": "text", + "content": ". As we mentioned before, this can be done without notable computational cost (please refer to the quantitative results in Appendix F) by predefined rules such as N-gram, regular expressions or an additional small language model (SLM)" + }, + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "type": "inline_equation", + "content": "^1" + }, + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "type": "text", + "content": ". Once built such a meaning set, EMPO approximately minimizes the semantic entropy " + }, + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 191, + 506, + 302 + ], + "type": "text", + "content": " by maximizing the following objective" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 134, + 305, + 505, + 338 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 305, + 505, + 338 + ], + "spans": [ + { + "bbox": [ + 134, + 305, + 505, + 338 + ], + "type": "interline_equation", + "content": "\\mathcal {J} _ {\\mathrm {E M P O}} = \\mathbb {E} _ {[ \\{q \\} \\sim P (Q), \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta} (O | q) ]} \\frac {1}{| G |} \\sum_ {i = 1} ^ {| G |} \\left(A _ {i}\\right), A _ {i} = \\frac {r _ {i} - m e a n \\left(\\left\\{r _ {1} , \\cdots , r _ {G} \\right\\}\\right)}{\\operatorname {s t d} \\left(r _ {1} , \\cdots , r _ {G}\\right)} \\tag {5}", + "image_path": "8edeaff50727c23da569f7a51a0f7793462c0fe8ed43158b6657fadb58d197a8.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 341, + 506, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 341, + 506, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 341, + 506, + 384 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 341, + 506, + 384 + ], + "type": "inline_equation", + "content": "A_{i}" + }, + { + "bbox": [ + 104, + 341, + 506, + 384 + ], + "type": "text", + "content": " is the advantage of output " + }, + { + "bbox": [ + 104, + 341, + 506, + 384 + ], + "type": "inline_equation", + "content": "o_{i}" + }, + { + "bbox": [ + 104, + 341, + 506, + 384 + ], + "type": "text", + "content": " calculated by normalizing the rewards. Unlike GRPO in which the rewards is calculated depending on external supervision such as pre-defined rules or an reward model, in EMP0, the reward assigned for the " + }, + { + "bbox": [ + 104, + 341, + 506, + 384 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 341, + 506, + 384 + ], + "type": "text", + "content": "-th outputs " + }, + { + "bbox": [ + 104, + 341, + 506, + 384 + ], + "type": "inline_equation", + "content": "o_{i}" + }, + { + "bbox": [ + 104, + 341, + 506, + 384 + ], + "type": "text", + "content": " is the likelihood of its meaning cluster, i.e.," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 239, + 385, + 504, + 398 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 385, + 504, + 398 + ], + "spans": [ + { + "bbox": [ + 239, + 385, + 504, + 398 + ], + "type": "interline_equation", + "content": "r _ {i} = p \\left(c _ {j} \\mid q\\right), \\text {w h e r e} l \\left(o _ {i}\\right) = c _ {j}, \\tag {6}", + "image_path": "7ef4529cd34fc2a7296699d3537746185e5b5d29e53f96418a1d55e307d2a293.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 399, + 506, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 399, + 506, + 422 + ], + "spans": [ + { + "bbox": [ + 104, + 399, + 506, + 422 + ], + "type": "text", + "content": "where the meaning likelihood " + }, + { + "bbox": [ + 104, + 399, + 506, + 422 + ], + "type": "inline_equation", + "content": "p(c_{j}|q)" + }, + { + "bbox": [ + 104, + 399, + 506, + 422 + ], + "type": "text", + "content": " is approximated by Eq. 3. Intuitively, the outputs convey higher-probability meanings are of higher advantages, and are therefore incentivized through training." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 426, + 506, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 426, + 506, + 545 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 506, + 545 + ], + "type": "text", + "content": "How to Mitigate Potential Reward Hacking? Note that different from verifiable rule-based reward, which inherently resists reward hacking risks, optimizing unsupervised entropy objectives may permit trivial solutions. For instance, models could exploit the reward signal by overfitting to high-confident but wrong predictions for the most frequent semantic clusters without carefully reasoning process. To address this, we implement a straightforward entropy thresholding strategy, restricting optimization to prompts exhibiting moderate uncertainty via dual threshold criteria. Specifically, two entropy thresholds are deployed to filter out user queries " + }, + { + "bbox": [ + 104, + 426, + 506, + 545 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 426, + 506, + 545 + ], + "type": "text", + "content": " that result in overly high or low entropy unreliable answers. Extremely high entropy indicates that the model is highly uncertain, and thus its predictions are prone to be unreliable. In addition, continuously optimizing on responses with already low entropy is redundant and at the risk of overconfidence [27]. The final optimization objective of EMPO is" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 199, + 548, + 350, + 563 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 199, + 548, + 350, + 563 + ], + "spans": [ + { + "bbox": [ + 199, + 548, + 350, + 563 + ], + "type": "interline_equation", + "content": "\\mathcal {J} _ {\\mathtt {E M P O}} = \\mathbb {E} _ {[ \\{q \\} \\sim P (Q), \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta} (O | q) ]}", + "image_path": "f37dc763d49cd9c8b442af61394897d5e9ce45dc8a0826fb26764cd9be5a1a7a.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 225, + 564, + 505, + 602 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 564, + 505, + 602 + ], + "spans": [ + { + "bbox": [ + 225, + 564, + 505, + 602 + ], + "type": "interline_equation", + "content": "\\left[ \\frac {1}{| G |} \\sum_ {i = 1} ^ {| G |} \\left(\\min \\left(A _ {i}, \\operatorname {c l i p} (1, 1 - \\epsilon , 1 + \\epsilon) A _ {i}\\right) \\right. \\right], \\tag {7}", + "image_path": "15968eddd9d3cb8bf970e29441620a8eaeab409e9505401226d3f1b796f26eca.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 222, + 604, + 317, + 616 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 604, + 317, + 616 + ], + "spans": [ + { + "bbox": [ + 222, + 604, + 317, + 616 + ], + "type": "interline_equation", + "content": "\\mathrm {s . t .} \\delta_ {l o w} < H < \\delta_ {h i g h}", + "image_path": "aa06a11c8f9c83fa8d330c1d96e3028040afc34a993a9be0d8c03a9e8b3bbd9e.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 619, + 506, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 619, + 506, + 675 + ], + "spans": [ + { + "bbox": [ + 104, + 619, + 506, + 675 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 619, + 506, + 675 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 619, + 506, + 675 + ], + "type": "text", + "content": " is the semantic entropy defined in Eq. 4. The questions results in highly unreliable answers with entropy greater than " + }, + { + "bbox": [ + 104, + 619, + 506, + 675 + ], + "type": "inline_equation", + "content": "\\delta_{high}" + }, + { + "bbox": [ + 104, + 619, + 506, + 675 + ], + "type": "text", + "content": " are filtered out. Besides, we also filter out low-entropy answers to maintain the diversity of model outputs and further avoid potential reward hacking. Following previous work [28], we remove the KL constraint for better performance. " + }, + { + "bbox": [ + 104, + 619, + 506, + 675 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 619, + 506, + 675 + ], + "type": "text", + "content": " clips extremely high or low advantages for stability similar to common practice." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 104, + 681, + 506, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 681, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 681, + 506, + 723 + ], + "type": "text", + "content": "1Such a SLM does not provide explicit or direct supervision signals regarding the correctness or quality of reasoning for a given query. The \"unsupervised\" nature of EMP0 refers to its independence from labeled (query, correct-answer) pairs or (query, valid-reasoning-trajectory) pairs for learning the reasoning task itself. More discussions are in Appendix I." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 192, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 192, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 192, + 85 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 96, + 226, + 109 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 96, + 226, + 109 + ], + "spans": [ + { + "bbox": [ + 105, + 96, + 226, + 109 + ], + "type": "text", + "content": "4.1 Experimental Settings" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 118, + 504, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 118, + 504, + 152 + ], + "spans": [ + { + "bbox": [ + 104, + 118, + 504, + 152 + ], + "type": "text", + "content": "We conduct experiments on multiple datasets including both closed-form math reasoning tasks and free-form natural reasoning tasks. Our EMP0 shows competitive performance by purely RL in a fully unsupervised manner compared to supervised finetuning and RL methods." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 156, + 506, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 156, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 156, + 506, + 266 + ], + "type": "text", + "content": "Prompt Collection and Data Engineering. For mathematical reasoning, following the common practice [29, 8, 30], we adopt 20,000 prompts randomly selected from NuminaMath-CoT dataset [31] for training" + }, + { + "bbox": [ + 104, + 156, + 506, + 266 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 104, + 156, + 506, + 266 + ], + "type": "text", + "content": " without additional data engineering. For free-form natural reasoning tasks, we adopt the prompts from Natural Reasoning" + }, + { + "bbox": [ + 104, + 156, + 506, + 266 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 104, + 156, + 506, + 266 + ], + "type": "text", + "content": ", a large-scale dataset consisting of diverse reasoning questions from multiple domains (e.g., Physics, Computer Science, Economics, Social Sciences and more). For training efficiency, we filter out the questions with over-long prompt or reference answer. Besides, taking inspiration from [32, 33, 34], we use the response length of Llama3.3-70B-Instruct as a difficulty estimation metric, and filter out samples with response lengths exceeding 4096 tokens. The remaining samples are simpler for stabilizing the training process. The final training subset is consisted of 18,000 questions. More details can be found in Appendix G." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "text", + "content": "Evaluation. For mathematical reasoning, the performance is evaluated on a diverse suite of benchmarks including Minerva Math, MATH, AMC23, OlympaidBench and AIME24. The evaluation codebase is borrowed from the SimpleRL project [35], which is consistent with other concurrent works [30]. For free-form natural reasoning, we evaluate on MMLU-Pro [36] and GPQA [37] benchmarks, which consist of challenging reasoning-focused problems across various subjects, e.g., biology, business, chemistry, computer science and so on. We prompt the model to reason step by step and output the final answer within \"\\boxed{}\" and report the multi-choice accuracy. Without specific clarification, all evaluations are conducted using zero-shot prompting and greedy-decoding." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 363, + 506, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 363, + 506, + 485 + ], + "spans": [ + { + "bbox": [ + 104, + 363, + 506, + 485 + ], + "type": "text", + "content": "Model training. For mathematical reasoning tasks, we train Qwen2.5-Math-1.5B and 7B Base models with our EMP0. The baselines we consider include supervised finetuning (SFT), online direct preference optimization (ODPO) [30] and the representative GRPO. We also compared with Qwen2.5-Math Instruction models for a more comprehensive comparison, where the instruction model is trained by iteratively supervised finetuning and RL on private data. For free-form natural reasoning tasks, we initialize from Qwen2.5-3B, 7B and 14B Base models. Different from mathematical reasoning, it is difficult to adopt rule-based reward for free-form question-answering tasks without deterministic golden answers. We consider the corresponding Instruct model, the Base model with or without few-shot CoT prompt as baselines. Besides, we also compare with SFT where the Base model is tuned to fit the response of Llama3.3-70B-Instruct. For more results on other model families beyond the Qwen series (e.g., Llama3), please refer to the Appendix D." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 494, + 504, + 687 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 132, + 494, + 504, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 494, + 504, + 515 + ], + "spans": [ + { + "bbox": [ + 132, + 494, + 504, + 515 + ], + "type": "text", + "content": "- SFT: We train models by supervised finetuning via Open-Instruct [38] with a fixed learning rate of " + }, + { + "bbox": [ + 132, + 494, + 504, + 515 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-6}" + }, + { + "bbox": [ + 132, + 494, + 504, + 515 + ], + "type": "text", + "content": ", a global batch size of 128 and train for 1 epoch with a max length of 2048." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 522, + 504, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 522, + 504, + 597 + ], + "spans": [ + { + "bbox": [ + 132, + 522, + 504, + 597 + ], + "type": "text", + "content": "- GRPO: We implement GRPO viaTRL [39] based on Open-R1 [29]. We sample 7 and 12 responses for each prompt for mathematical and natural reasoning tasks respectively. We train the model for 3 epochs with a maximum generation length of 2048. Following [40], we only use the rule-based accuracy reward and do not adopt format-reward. The accuracy reward is implemented as follows: If the response contains the correct final answer within \"boxed{}\", it receives a reward of 1. If the model prediction is wrong, it receives a reward of 0. When there is no answer can be extracted from the model's response, the reward is " + }, + { + "bbox": [ + 132, + 522, + 504, + 597 + ], + "type": "inline_equation", + "content": "-0.5" + }, + { + "bbox": [ + 132, + 522, + 504, + 597 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 604, + 503, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 604, + 503, + 648 + ], + "spans": [ + { + "bbox": [ + 132, + 604, + 503, + 648 + ], + "type": "text", + "content": "- Online-DPO: Recent advanced Online-DPO first samples a set of responses and then verifies and selects the responses with highest reward and lowest reward as a preference pair. We directly copy the results from [30], where the model is trained for 7 iterations. Each iteration involves 2 training epochs and 20K training samples, i.e., 140K training samples in total." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 654, + 503, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 654, + 503, + 687 + ], + "spans": [ + { + "bbox": [ + 132, + 654, + 503, + 687 + ], + "type": "text", + "content": "- EMP0: Most hyper-parameters of our method, e.g., number of generations, max generation length, batch size, learning rate are the same with GRPO. In mathematical reasoning tasks, we use a set of regular expressions to merge the outputs into meaning clusters. For" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 116, + 700, + 350, + 711 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 700, + 350, + 711 + ], + "spans": [ + { + "bbox": [ + 116, + 700, + 350, + 711 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 116, + 700, + 350, + 711 + ], + "type": "text", + "content": "https://huggingface.co/datasets/RLHFlow/numa_prompt_dpo1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 119, + 712, + 337, + 721 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 712, + 337, + 721 + ], + "spans": [ + { + "bbox": [ + 119, + 712, + 337, + 721 + ], + "type": "text", + "content": "3https://huggingface.co/datasets/facebook/natural_reasoning" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 140, + 72, + 506, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 72, + 506, + 139 + ], + "spans": [ + { + "bbox": [ + 140, + 72, + 506, + 139 + ], + "type": "text", + "content": "more general free-form natural reasoning, we leverage General-Verifier" + }, + { + "bbox": [ + 140, + 72, + 506, + 139 + ], + "type": "inline_equation", + "content": "^4" + }, + { + "bbox": [ + 140, + 72, + 506, + 139 + ], + "type": "text", + "content": " (a compact small language model with 1.5B parameters) to determine whether two outputs are of the same meaning or not following [23, 24]. A concrete example can be found in Appendix C. Specifically, if the final predictions (i.e., the contents within \"\\boxed{}\") of two model outputs are bidirectionally implicating, then we merge them into one semantic cluster ignoring their reasoning traces. More details are in Appendix E." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 152, + 187, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 152, + 187, + 163 + ], + "spans": [ + { + "bbox": [ + 105, + 152, + 187, + 163 + ], + "type": "text", + "content": "4.2 Main Results" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 171, + 344, + 185 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 171, + 344, + 185 + ], + "spans": [ + { + "bbox": [ + 105, + 171, + 344, + 185 + ], + "type": "text", + "content": "4.2.1 Performance on Mathematical Reasoning Tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 190, + 506, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 190, + 506, + 279 + ], + "spans": [ + { + "bbox": [ + 104, + 190, + 506, + 279 + ], + "type": "text", + "content": "We conduct experiments on mathematical tasks to evaluate our method. The main results are shown in Table 1. EMP0 has successfully incentivized the Qwen2.5-Math Base model with reasoning capability without dependency on any external supervision. We observe a substantial improvement in the average performance on commonly used mathematical reasoning benchmarks from " + }, + { + "bbox": [ + 104, + 190, + 506, + 279 + ], + "type": "inline_equation", + "content": "28.1\\%" + }, + { + "bbox": [ + 104, + 190, + 506, + 279 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 190, + 506, + 279 + ], + "type": "inline_equation", + "content": "42.1\\%" + }, + { + "bbox": [ + 104, + 190, + 506, + 279 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 190, + 506, + 279 + ], + "type": "inline_equation", + "content": "30.7\\%" + }, + { + "bbox": [ + 104, + 190, + 506, + 279 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 190, + 506, + 279 + ], + "type": "inline_equation", + "content": "48.1\\%" + }, + { + "bbox": [ + 104, + 190, + 506, + 279 + ], + "type": "text", + "content": " on 1.5B and 7B models, respectively. Notably, through fully unsupervised RL training, the 1.5B and 7B model has both achieved competitive performance (42.1% and " + }, + { + "bbox": [ + 104, + 190, + 506, + 279 + ], + "type": "inline_equation", + "content": "48.1\\%" + }, + { + "bbox": [ + 104, + 190, + 506, + 279 + ], + "type": "text", + "content": ") near to Qwen2.5-Math-Instruct (40.5% and " + }, + { + "bbox": [ + 104, + 190, + 506, + 279 + ], + "type": "inline_equation", + "content": "49.4\\%" + }, + { + "bbox": [ + 104, + 190, + 506, + 279 + ], + "type": "text", + "content": "), where the latter depends on private dataset and multi-stage iteratively supervised fine-tuning and reinforcement learning." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 105, + 327, + 507, + 514 + ], + "blocks": [ + { + "bbox": [ + 104, + 288, + 504, + 323 + ], + "lines": [ + { + "bbox": [ + 104, + 288, + 504, + 323 + ], + "spans": [ + { + "bbox": [ + 104, + 288, + 504, + 323 + ], + "type": "text", + "content": "Table 1: Accuracy on mathematical reasoning benchmarks. We report the pass@1 accuracy tested with greedy decoding. The results of ODPO are directly copied from [30]. Here " + }, + { + "bbox": [ + 104, + 288, + 504, + 323 + ], + "type": "inline_equation", + "content": "q, r, a" + }, + { + "bbox": [ + 104, + 288, + 504, + 323 + ], + "type": "text", + "content": " denote the dependency on questions, human-verified reasoning traces and golden answers respectively." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 105, + 327, + 507, + 514 + ], + "lines": [ + { + "bbox": [ + 105, + 327, + 507, + 514 + ], + "spans": [ + { + "bbox": [ + 105, + 327, + 507, + 514 + ], + "type": "table", + "html": "
SupervisionMATHMinerva MathOlympiad BenchAIME24AMC23Avg.
frontier model
Llama-3.1-70B-Instruct{q,r,a}64.635.331.916.730.135.7
Eurus-2-7B-PRIME{q,r,a}79.238.642.126.757.848.9
1.5B model
Qwen2.5-MathNone52.210.725.210.042.528.1
Qwen2.5-Math-Instruct{q,r,a}73.830.938.76.752.540.5
Qwen2.5-Math w/SFT{q,r,a}61.826.127.13.337.531.2
Qwen2.5-Math w/GRPO{q,a}75.232.033.616.752.542.0
Qwen2.5-Math w/EMPO{q}73.032.436.613.355.042.1
7B model
Qwen2.5-MathNone64.815.126.76.740.030.7
Qwen2.5-Math Instruct{q,r,a}82.843.841.216.762.549.4
Qwen2.5-Math w/SFT{q,r,a}72.234.633.210.045.039.0
Qwen2.5-Math w/ODPO{q,a}76.830.937.926.762.547.0
Qwen2.5-Math w/GRPO{q,a}77.839.739.120.057.546.8
Qwen2.5-Math w/EMPO{q}78.040.437.320.065.048.1
", + "image_path": "fc80a9084de5033e815e2520f330fdf2683f45aaa0f8388c48366e9e8069a1d9.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 531, + 362, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 531, + 362, + 544 + ], + "spans": [ + { + "bbox": [ + 105, + 531, + 362, + 544 + ], + "type": "text", + "content": "4.2.2 Performance on Natural Free-form Reasoning Tasks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "text", + "content": "We present the results on free-form natural reasoning tasks in Table 2. On the MMLU-Pro benchmark, our EMP0 improves the accuracy from " + }, + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "inline_equation", + "content": "32.1\\%" + }, + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "inline_equation", + "content": "50.1\\%" + }, + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "inline_equation", + "content": "32.7\\%" + }, + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "inline_equation", + "content": "58.8\\%" + }, + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "text", + "content": " on Qwen2.5-7B and 14B Base model respectively. Besides, on more challenging GPQA benchmark, EMP0 results in increasing accuracy from " + }, + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "inline_equation", + "content": "15.9\\%" + }, + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "inline_equation", + "content": "28.8\\%" + }, + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "text", + "content": " on 7B model, " + }, + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "inline_equation", + "content": "30.6\\%" + }, + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "inline_equation", + "content": "35.3\\%" + }, + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "text", + "content": " on 14B model. Notably, we observe that the SFT baseline fails to consistently improve model performance. We hypothesize that this is due to the noise in the reference responses within the Natural Reasoning training data (as mentioned by [32]). This phenomenon further underscores the practical potential of our proposed method." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 639, + 220, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 639, + 220, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 220, + 651 + ], + "type": "text", + "content": "4.2.3 Training Dynamics" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 658, + 504, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 658, + 504, + 703 + ], + "spans": [ + { + "bbox": [ + 104, + 658, + 504, + 703 + ], + "type": "text", + "content": "We further conduct experiments to investigate the reliability of our unsupervised reward signals. As shown in Figure 3, the unsupervised reward signals of EMP0 have a strongly negative correlation with the true rewards based on golden answers. Thus, by continuously minimizing the semantic entropy objective, the model can boost its accuracy in a fully unsupervised manner." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 116, + 710, + 307, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 710, + 307, + 722 + ], + "spans": [ + { + "bbox": [ + 116, + 710, + 307, + 722 + ], + "type": "text", + "content": "4https://huggingface.co/TIGER-Lab/general-verifier" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 115, + 108, + 496, + 345 + ], + "blocks": [ + { + "bbox": [ + 104, + 69, + 504, + 103 + ], + "lines": [ + { + "bbox": [ + 104, + 69, + 504, + 103 + ], + "spans": [ + { + "bbox": [ + 104, + 69, + 504, + 103 + ], + "type": "text", + "content": "Table 2: Accuracy results on free-form natural reasoning benchmarks. We report pass@1 accuracy tested with greedy decoding. Here " + }, + { + "bbox": [ + 104, + 69, + 504, + 103 + ], + "type": "inline_equation", + "content": "\\{q,r,a\\}" + }, + { + "bbox": [ + 104, + 69, + 504, + 103 + ], + "type": "text", + "content": " denote the dependency on questions, human-verified reasoning traces and verifiable golden answers respectively." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 115, + 108, + 496, + 345 + ], + "lines": [ + { + "bbox": [ + 115, + 108, + 496, + 345 + ], + "spans": [ + { + "bbox": [ + 115, + 108, + 496, + 345 + ], + "type": "table", + "html": "
SupervisionMMLU ProGPQA
STEMHumanitiesSocialOtherAvg.
3B model
Qwen2.5-Base-8.325.357.424.156.8311.2
Qwen2.5-Base 5-shot{q,r,a}34.726.247.935.935.313.8
Qwen2.5-Instruct{q,r,a}44.830.756.047.144.528.2
Qwen2.5-Base w/SFT{q,r,a}19.810.428.018.419.111.5
Qwen2.5-Base w/GRPO{q,a}32.227.749.838.735.217.1
Qwen2.5-Base w/EMPO{q}31.726.248.136.734.120.6
7B model
Qwen2.5-Base-30.123.845.934.332.115.9
Qwen2.5-Base 5-shot{q,r,a}45.736.359.149.446.823.5
Qwen2.5-Instruct{q,r,a}56.938.164.158.655.235.3
Qwen2.5-Base w/SFT{q,r,a}32.67.115.830.125.622.4
Qwen2.5-Base w/GRPO{q,a}57.136.264.456.654.533.8
Qwen2.5-Base w/EMPO{q}52.434.659.050.950.128.8
14B model
Qwen2.5-Base-30.828.044.433.032.730.6
Qwen2.5-Base 5-shot{q,r,a}51.935.863.454.451.433.2
Qwen2.5-Instruct{q,r,a}63.647.173.866.762.942.9
Qwen2.5-Base w/SFT{q,r,a}37.027.840.238.036.128.5
Qwen2.5-Base w/GRPO{q,a}62.942.168.659.859.635.6
Qwen2.5-Base w/EMPO{q}61.441.668.360.058.835.3
", + "image_path": "4ef9f197d8e943cb755d6ac18d94d533d69a071ce7d5d85c4d3749ba340c60ae.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 108, + 355, + 235, + 462 + ], + "blocks": [ + { + "bbox": [ + 108, + 355, + 235, + 462 + ], + "lines": [ + { + "bbox": [ + 108, + 355, + 235, + 462 + ], + "spans": [ + { + "bbox": [ + 108, + 355, + 235, + 462 + ], + "type": "image", + "image_path": "54ea7ba11ac679ed9b70c45139b15bb0c494d20f9cfe7b9ba7e519498e1d1d00.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 472, + 504, + 538 + ], + "lines": [ + { + "bbox": [ + 104, + 472, + 504, + 538 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 504, + 538 + ], + "type": "text", + "content": "Figure 3: We visualize the training dynamics when tuning Qwen2.5-Math-7B Base model with EMP0 on 20K prompts randomly sampled from NuminaMath-CoT. The left illustrates the running average of semantic entropy (Eq. 4). The middle shows the trend of our unsupervised reward as defined by Eq. 6. The right shows the model accuracy on training data at each RL steps. Along the unsupervised RL-based training trajectory, EMP0 establishes a stable learning process with consistently decreased semantic entropy and improved accuracy." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 236, + 355, + 367, + 462 + ], + "blocks": [ + { + "bbox": [ + 236, + 355, + 367, + 462 + ], + "lines": [ + { + "bbox": [ + 236, + 355, + 367, + 462 + ], + "spans": [ + { + "bbox": [ + 236, + 355, + 367, + 462 + ], + "type": "image", + "image_path": "075188f39a5ebdacf50005cb09d7e38cd4328467bddde5289c5e00402e56d320.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 369, + 355, + 500, + 462 + ], + "blocks": [ + { + "bbox": [ + 369, + 355, + 500, + 462 + ], + "lines": [ + { + "bbox": [ + 369, + 355, + 500, + 462 + ], + "spans": [ + { + "bbox": [ + 369, + 355, + 500, + 462 + ], + "type": "image", + "image_path": "f93c2ee71c0adf195cf26c5902acef4260f08f79adba738f3dcae5c452ddfc31.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 556, + 504, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 556, + 504, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 556, + 504, + 586 + ], + "type": "text", + "content": "5 Discussion and Conclusion: The Role of Unsupervised Learning in Eliciting Pre-Trained Reasoning Capabilities" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 596, + 504, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 596, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 596, + 504, + 662 + ], + "type": "text", + "content": "The strong empirical performance of EMP0, particularly its ability as a fully unsupervised method to match or even slightly outperform supervised counterparts like GRPO (as observed with the 7B model), prompts a deeper examination of how such reasoning incentivization mechanisms work. This is especially pertinent given the counterintuitive observation that these substantial improvements on benchmarks are achieved without a consistent increase in response length or clear evidence of an \"Aha moment\" – a hypothesized sudden emergence of enhanced reasoning capabilities." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 666, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 506, + 723 + ], + "type": "text", + "content": "To dissect the nature of the improvements conferred by reinforcement learning (RL) post-training, we investigated its influence on pass@k accuracy. This metric is crucial as recent studies [41, 42] suggest that RL may not fundamentally expand the inherent reasoning capacities of LLMs beyond those already embedded in their pre-trained base. As depicted in Figure 4, our findings align with this perspective. Both GRPO and EMP0 significantly enhance pass@k scores for small to moderate" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 71, + 304, + 208 + ], + "blocks": [ + { + "bbox": [ + 109, + 71, + 304, + 208 + ], + "lines": [ + { + "bbox": [ + 109, + 71, + 304, + 208 + ], + "spans": [ + { + "bbox": [ + 109, + 71, + 304, + 208 + ], + "type": "image", + "image_path": "335efb518b469c6cc68204d57510d455982bcad4c3c5a141a0b1137dc465080b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 221, + 504, + 288 + ], + "lines": [ + { + "bbox": [ + 104, + 221, + 504, + 288 + ], + "spans": [ + { + "bbox": [ + 104, + 221, + 504, + 288 + ], + "type": "text", + "content": "Figure 4: Pass@k curves of Qwen2.5-Math-7B Base model and its counterparts trained with GRPO and our EMP0 on Minerva Math and OMNI reasoning benchmarks. Pass@k measures the probability that at least 1 of the top " + }, + { + "bbox": [ + 104, + 221, + 504, + 288 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 221, + 504, + 288 + ], + "type": "text", + "content": " generated solutions is correct. Pass@1 is equivalent to accuracy, as it checks if the single solution is correct. When " + }, + { + "bbox": [ + 104, + 221, + 504, + 288 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 221, + 504, + 288 + ], + "type": "text", + "content": " is small, RL-trained models outperform the original base model. However, as " + }, + { + "bbox": [ + 104, + 221, + 504, + 288 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 221, + 504, + 288 + ], + "type": "text", + "content": " increases (e.g., into the tens or hundreds), the performance of the base models often converges with, or even exceeds, that of the RL-trained models." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 304, + 71, + 495, + 207 + ], + "blocks": [ + { + "bbox": [ + 304, + 71, + 495, + 207 + ], + "lines": [ + { + "bbox": [ + 304, + 71, + 495, + 207 + ], + "spans": [ + { + "bbox": [ + 304, + 71, + 495, + 207 + ], + "type": "image", + "image_path": "9954ebe5c2ce338a3ab700264b280f56e5763e1ff349dfed0fa4106fd524e1d7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 318, + 504, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 318, + 504, + 362 + ], + "spans": [ + { + "bbox": [ + 104, + 318, + 504, + 362 + ], + "type": "text", + "content": "values of k (e.g., " + }, + { + "bbox": [ + 104, + 318, + 504, + 362 + ], + "type": "inline_equation", + "content": "k = 16" + }, + { + "bbox": [ + 104, + 318, + 504, + 362 + ], + "type": "text", + "content": " or 32) compared to the base model. This demonstrates an improved efficiency in surfacing correct reasoning paths with fewer attempts. However, as k becomes substantially large, the performance of these RL-trained models tends to converge with, and is sometimes surpassed by, that of the base model." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 366, + 506, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 366, + 506, + 455 + ], + "spans": [ + { + "bbox": [ + 104, + 366, + 506, + 455 + ], + "type": "text", + "content": "This convergence at high " + }, + { + "bbox": [ + 104, + 366, + 506, + 455 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 366, + 506, + 455 + ], + "type": "text", + "content": " values, coupled with our qualitative observations that the base models themselves already exhibit sophisticated reasoning behaviors such as pausing, self-correction, and backtracking (see Appendix for examples), strongly indicates that the foundational reasoning pathways are largely pre-existing. Consequently, RL post-training, whether supervised or unsupervised like EMP0, appears to primarily refine the model's ability to efficiently access, prioritize, and consistently select these latent reasoning patterns, rather than instilling fundamentally novel ones. The observed improvements in pass@1 (accuracy) are thus likely a consequence of this enhanced sampling efficiency." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 460, + 504, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 460, + 504, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 460, + 504, + 559 + ], + "type": "text", + "content": "These empirical insights from the pass@k analysis lend considerable support to the emerging consensus that pre-training shoulders the primary burden of endowing LLMs with their core abilities. We align our interpretation with prior insights from [43]: \"Pretraining does all the hard work. One big bet is that the pretraining phase grants all the abilities to the base LM, and finetuning is simply like a style transfer which positions the model to the right output space.\" Under this conjecture (or more precisely, an emerging, but not yet unanimously accepted consensus [41]), we attribute the efficacy of our method to the robust pretraining process of the Qwen2.5 Base model: If a base model possesses strong inherent reasoning capabilities, the subsequent challenge is not necessarily to teach it new reasoning skills from scratch, but rather to effectively elicit and guide these existing skills." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 563, + 504, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 563, + 504, + 629 + ], + "spans": [ + { + "bbox": [ + 104, + 563, + 504, + 629 + ], + "type": "text", + "content": "EMPO's success highlights that intrinsic reward signals, derived purely from the model's objective to minimize semantic entropy and thus achieve greater consistency in its outputs, can be surprisingly potent for this elicitation process. In a well-pre-trained model, outputs that are semantically consistent are more likely to align with correct and coherent reasoning. EMPO leverages this by incentivizing the model to favor such consistent outputs, effectively guiding it to refine its selection from its collection of existing reasoning strategies without requiring external validation of correctness." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 634, + 506, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 506, + 722 + ], + "type": "text", + "content": "In conclusion, while RL techniques, including EMP0, may not be forging entirely new fundamental reasoning capabilities beyond what pre-training provides, their role in significantly enhancing the sampling efficiency and reliability of accessing these pre-trained abilities is of paramount practical importance. Optimizing models for such efficiency is crucial for real-world applications. EMP0, by achieving this through a fully unsupervised framework, offers a particularly scalable, cost-effective, and practical approach to unlocking and refining the vast reasoning potential embedded within pre-trained LLMs, especially in domains where curated supervisory data is scarce or prohibitively expensive to obtain." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 71, + 164, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 71, + 164, + 83 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 164, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 91, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 111, + 91, + 505, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 91, + 505, + 125 + ], + "spans": [ + { + "bbox": [ + 111, + 91, + 505, + 125 + ], + "type": "text", + "content": "[1] Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 127, + 506, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 127, + 506, + 161 + ], + "spans": [ + { + "bbox": [ + 111, + 127, + 506, + 161 + ], + "type": "text", + "content": "[2] Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, et al. Process reinforcement through implicit rewards. arXiv preprint arXiv:2502.01456, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 163, + 506, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 163, + 506, + 196 + ], + "spans": [ + { + "bbox": [ + 111, + 163, + 506, + 196 + ], + "type": "text", + "content": "[3] Lifan Yuan, Wendi Li, Huayu Chen, Ganqu Cui, Ning Ding, Kaiyan Zhang, Bowen Zhou, Zhiyuan Liu, and Hao Peng. Free process rewards without process labels. arXiv preprint arXiv:2412.01981, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 200, + 504, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 200, + 504, + 223 + ], + "spans": [ + { + "bbox": [ + 111, + 200, + 504, + 223 + ], + "type": "text", + "content": "[4] Dejian Yang Daya Guo, Qihao Zhu. Deepseek-coder: When the large language model meets programming – the rise of code intelligence, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 225, + 504, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 225, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 111, + 225, + 504, + 258 + ], + "type": "text", + "content": "[5] An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 261, + 504, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 261, + 504, + 285 + ], + "spans": [ + { + "bbox": [ + 111, + 261, + 504, + 285 + ], + "type": "text", + "content": "[6] Jiaxin Huang, Shixiang Shane Gu, Le Hou, Yuexin Wu, Xuezhi Wang, Hongkun Yu, and Jiawei Han. Large language models can self-improve. arXiv preprint arXiv:2210.11610, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 286, + 506, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 286, + 506, + 319 + ], + "spans": [ + { + "bbox": [ + 111, + 286, + 506, + 319 + ], + "type": "text", + "content": "[7] Ilia Shumailov, Zakhar Shumaylov, Yiren Zhao, Nicolas Papernot, Ross Anderson, and Yarin Gal. Ai models collapse when trained on recursively generated data. Nature, 631(8022):755-759, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 323, + 506, + 356 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 323, + 506, + 356 + ], + "spans": [ + { + "bbox": [ + 111, + 323, + 506, + 356 + ], + "type": "text", + "content": "[8] Fangkai Jiao, Geyang Guo, Xingxing Zhang, Nancy F Chen, Shafiq Joty, and Furu Wei. Preference optimization for reasoning with pseudo feedback. arXiv preprint arXiv:2411.16345, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 358, + 506, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 358, + 506, + 382 + ], + "spans": [ + { + "bbox": [ + 111, + 358, + 506, + 382 + ], + "type": "text", + "content": "[9] Wei Xiong, Hanning Zhang, Chenlu Ye, Lichang Chen, Nan Jiang, and Tong Zhang. Selfrewarding correction for mathematical reasoning. arXiv preprint arXiv:2502.19613, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 384, + 504, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 384, + 504, + 407 + ], + "spans": [ + { + "bbox": [ + 107, + 384, + 504, + 407 + ], + "type": "text", + "content": "[10] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 410, + 506, + 444 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 410, + 506, + 444 + ], + "spans": [ + { + "bbox": [ + 106, + 410, + 506, + 444 + ], + "type": "text", + "content": "[11] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 446, + 506, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 446, + 506, + 480 + ], + "spans": [ + { + "bbox": [ + 107, + 446, + 506, + 480 + ], + "type": "text", + "content": "[12] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 482, + 504, + 516 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 482, + 504, + 516 + ], + "spans": [ + { + "bbox": [ + 107, + 482, + 504, + 516 + ], + "type": "text", + "content": "[13] Ajay Patel, Markus Hofmarcher, Claudiu Leoveanu-Condrei, Marius-Constantin Dinu, Chris Callison-Burch, and Sepp Hochreiter. Large language models can self-improve at web agent tasks. arXiv preprint arXiv:2405.20309, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 518, + 506, + 551 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 518, + 506, + 551 + ], + "spans": [ + { + "bbox": [ + 107, + 518, + 506, + 551 + ], + "type": "text", + "content": "[14] Siheng Li, Cheng Yang, Zesen Cheng, Lemao Liu, Mo Yu, Yujiu Yang, and Wai Lam. Large language models can self-improve in long-context reasoning. arXiv preprint arXiv:2411.08147, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 554, + 506, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 554, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 107, + 554, + 506, + 588 + ], + "type": "text", + "content": "[15] Yuxin Zuo, Kaiyan Zhang, Shang Qu, Li Sheng, Xuekai Zhu, Biqing Qi, Youbang Sun, Ganqu Cui, Ning Ding, and Bowen Zhou. Trl: Test-time reinforcement learning. arXiv preprint arXiv:2504.16084, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 590, + 504, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 590, + 504, + 613 + ], + "spans": [ + { + "bbox": [ + 107, + 590, + 504, + 613 + ], + "type": "text", + "content": "[16] Weizhe Yuan, Richard Yuanzhe Pang, Kyunghyun Cho, Xian Li, Sainbayar Sukhbaatar, Jing Xu, and Jason Weston. Self-rewarding language models, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 616, + 506, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 616, + 506, + 649 + ], + "spans": [ + { + "bbox": [ + 107, + 616, + 506, + 649 + ], + "type": "text", + "content": "[17] Muning Wen, Cheng Deng, Jun Wang, Weinan Zhang, and Ying Wen. Entropy-regularized token-level policy optimization for large language models. arXiv e-prints, pages arXiv-2402, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 652, + 506, + 686 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 652, + 506, + 686 + ], + "spans": [ + { + "bbox": [ + 107, + 652, + 506, + 686 + ], + "type": "text", + "content": "[18] Zhiheng Xi, Wenxiang Chen, Boyang Hong, Senjie Jin, Rui Zheng, Wei He, Yiwen Ding, Shichun Liu, Xin Guo, Junzhe Wang, et al. Training large language models for reasoning through reverse curriculum reinforcement learning. arXiv preprint arXiv:2402.05808, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 107, + 689, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 107, + 689, + 504, + 723 + ], + "type": "text", + "content": "[19] Zhaoyang Wang, Weilei He, Zhiyuan Liang, Xuchao Zhang, Chetan Bansal, Ying Wei, Weitong Zhang, and Huaxiu Yao. CREAM: Consistency regularized self-rewarding language models. In The Thirteenth International Conference on Learning Representations, 2025." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 506, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 506, + 95 + ], + "type": "text", + "content": "[20] Yves Grandvalet and Yoshua Bengio. Semi-supervised learning by entropy minimization. Advances in neural information processing systems, 17, 2004." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 98, + 505, + 121 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 98, + 505, + 121 + ], + "spans": [ + { + "bbox": [ + 106, + 98, + 505, + 121 + ], + "type": "text", + "content": "[21] Dequan Wang, Evan Shelhamer, Shaoteng Liu, Bruno Olshausen, and Trevor Darrell. Tent: Fully test-time adaptation by entropy minimization. arXiv preprint arXiv:2006.10726, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 124, + 504, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 124, + 504, + 147 + ], + "spans": [ + { + "bbox": [ + 107, + 124, + 504, + 147 + ], + "type": "text", + "content": "[22] Qingyang Zhang, Yatao Bian, Xinke Kong, Peilin Zhao, and Changqing Zhang. Come: Test-time adaption by conservatively minimizing entropy. arXiv preprint arXiv:2410.10894, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 149, + 506, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 149, + 506, + 182 + ], + "spans": [ + { + "bbox": [ + 106, + 149, + 506, + 182 + ], + "type": "text", + "content": "[23] Lorenz Kuhn, Yarin Gal, and Sebastian Farquhar. Semantic uncertainty: Linguistic invariances for uncertainty estimation in natural language generation. arXiv preprint arXiv:2302.09664, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 186, + 504, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 186, + 504, + 209 + ], + "spans": [ + { + "bbox": [ + 107, + 186, + 504, + 209 + ], + "type": "text", + "content": "[24] Sebastian Farquhar, Jannik Kossen, Lorenz Kuhn, and Yarin Gal. Detecting hallucinations in large language models using semantic entropy. Nature, 630(8017):625-630, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 212, + 504, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 212, + 504, + 245 + ], + "spans": [ + { + "bbox": [ + 107, + 212, + 504, + 245 + ], + "type": "text", + "content": "[25] Eugene Kharitonov, Rahma Chaabouni, Diane Bouchacourt, and Marco Baroni. Entropy minimization in emergent languages. In International Conference on Machine Learning, pages 5220-5230. PMLR, 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 248, + 504, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 248, + 504, + 272 + ], + "spans": [ + { + "bbox": [ + 107, + 248, + 504, + 272 + ], + "type": "text", + "content": "[26] Ori Press, Ravid Shwartz-Ziv, Yann LeCun, and Matthias Bethge. The entropy enigma: Success and failure of entropy minimization. arXiv preprint arXiv:2405.05012, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 274, + 506, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 274, + 506, + 319 + ], + "spans": [ + { + "bbox": [ + 107, + 274, + 506, + 319 + ], + "type": "text", + "content": "[27] Soren Mindermann, Jan M Brauner, Muhammed T Razzak, Mrinank Sharma, Andreas Kirsch, Winnie Xu, Benedikt Holgen, Aidan N Gomez, Adrien Morisot, Sebastian Farquhar, et al. Prioritized training on points that are learnable, worth learning, and not yet learnt. In International Conference on Machine Learning, pages 15630-15649. PMLR, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 322, + 506, + 356 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 322, + 506, + 356 + ], + "spans": [ + { + "bbox": [ + 107, + 322, + 506, + 356 + ], + "type": "text", + "content": "[28] Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. Dapo: An open-source llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 358, + 458, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 358, + 458, + 371 + ], + "spans": [ + { + "bbox": [ + 107, + 358, + 458, + 371 + ], + "type": "text", + "content": "[29] Hugging Face. Open r1: A fully open reproduction of deepseek-r1, January 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 373, + 504, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 373, + 504, + 396 + ], + "spans": [ + { + "bbox": [ + 107, + 373, + 504, + 396 + ], + "type": "text", + "content": "[30] Hanning Zhang, Jiarui Yao, Chenlu Ye, Wei Xiong, and Tong Zhang. Online-dpo-r1: Unlocking effective reasoning without the ppo overhead, 2025. Notion Blog." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 399, + 506, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 399, + 506, + 464 + ], + "spans": [ + { + "bbox": [ + 107, + 399, + 506, + 464 + ], + "type": "text", + "content": "[31] Jia LI, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, Kashif Rasul, Longhui Yu, Albert Jiang, Ziju Shen, Zihan Qin, Bin Dong, Li Zhou, Yann Fleureau, Guillaume Lample, and Stanislas Polu. Numinamath. [https://huggingface.co/AI-MO/NuminaMath-CoT](https://github.com/project-numina/aimo-progress-prize/blob/main/report/numina_dataset.pdf), 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 468, + 506, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 468, + 506, + 502 + ], + "spans": [ + { + "bbox": [ + 107, + 468, + 506, + 502 + ], + "type": "text", + "content": "[32] Weizhe Yuan, Jane Yu, Song Jiang, Karthik Padthe, Yang Li, Dong Wang, Ilia Kulikov, Kyunghyun Cho, Yuandong Tian, Jason E Weston, et al. Naturalreasoning: Reasoning in the wild with " + }, + { + "bbox": [ + 107, + 468, + 506, + 502 + ], + "type": "inline_equation", + "content": "2.8\\mathrm{m}" + }, + { + "bbox": [ + 107, + 468, + 506, + 502 + ], + "type": "text", + "content": " challenging questions. arXiv preprint arXiv:2502.13124, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 505, + 504, + 539 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 505, + 504, + 539 + ], + "spans": [ + { + "bbox": [ + 107, + 505, + 504, + 539 + ], + "type": "text", + "content": "[33] Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. Do not think that much for " + }, + { + "bbox": [ + 107, + 505, + 504, + 539 + ], + "type": "inline_equation", + "content": "2 + 3 = ?" + }, + { + "bbox": [ + 107, + 505, + 504, + 539 + ], + "type": "text", + "content": " on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 542, + 504, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 542, + 504, + 564 + ], + "spans": [ + { + "bbox": [ + 107, + 542, + 504, + 564 + ], + "type": "text", + "content": "[34] Huan Ma, Jingdong Chen, Guangyu Wang, and Changqing Zhang. Estimating llm uncertainty with logits. arXiv preprint arXiv:2502.00290, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 567, + 504, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 567, + 504, + 602 + ], + "spans": [ + { + "bbox": [ + 107, + 567, + 504, + 602 + ], + "type": "text", + "content": "[35] Weihao Zeng, Yuzhen Huang, Wei Liu, Keqing He, Qian Liu, Zejun Ma, and Junxian He. 7b model and 8k examples: Emerging reasoning with reinforcement learning is both effective and efficient. https://hkust-nlp.notion.site/simplerl-reason, 2025. Notion Blog." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 605, + 506, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 605, + 506, + 649 + ], + "spans": [ + { + "bbox": [ + 107, + 605, + 506, + 649 + ], + "type": "text", + "content": "[36] Yubo Wang, Xueguang Ma, Ge Zhang, Yuansheng Ni, Abhranil Chandra, Shiguang Guo, Weiming Ren, Aaran Arulraj, Xuan He, Ziyan Jiang, et al. Mmlu-pro: A more robust and challenging multi-task language understanding benchmark. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 652, + 504, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 652, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 107, + 652, + 504, + 685 + ], + "type": "text", + "content": "[37] David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R Bowman. Gpqa: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 689, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 107, + 689, + 504, + 723 + ], + "type": "text", + "content": "[38] Nathan Lambert, Jacob Morrison, Valentina Pyatkin, Shengyi Huang, Hamish Ivison, Faeze Brahman, Lester James V. Miranda, Alisa Liu, Nouha Dziri, Shane Lyu, Yuling Gu, Saumya Malik, Victoria Graf, Jena D. Hwang, Jiangjiang Yang, Ronan Le Bras, Oyvind Tafjord, Chris" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 294 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 127, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 127, + 72, + 505, + 95 + ], + "type": "text", + "content": "Wilhelm, Luca Soldaini, Noah A. Smith, Yizhong Wang, Pradeep Dasigi, and Hannaneh Hajishirzi. Tulu 3: Pushing frontiers in open language model post-training. 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 107, + 98, + 506, + 132 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 98, + 506, + 132 + ], + "spans": [ + { + "bbox": [ + 107, + 98, + 506, + 132 + ], + "type": "text", + "content": "[39] Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. Trl: Transformer reinforcement learning. https://github.com/huggingface/trl, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 135, + 505, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 135, + 505, + 168 + ], + "spans": [ + { + "bbox": [ + 107, + 135, + 505, + 168 + ], + "type": "text", + "content": "[40] Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, and Heung-Yeung Shum Xiangyu Zhang. Open-reasoner-zero: An open source approach to scaling reinforcement learning on the base model. https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 171, + 504, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 171, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 106, + 171, + 504, + 205 + ], + "type": "text", + "content": "[41] Yang Yue, Zhiqi Chen, Rui Lu, Andrew Zhao, Zhaokai Wang, Shiji Song, and Gao Huang. Does reinforcement learning really incentivize reasoning capacity in llms beyond the base model? arXiv preprint arXiv:2504.13837, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 208, + 506, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 208, + 506, + 241 + ], + "spans": [ + { + "bbox": [ + 106, + 208, + 506, + 241 + ], + "type": "text", + "content": "[42] Yuda Song, Hanlin Zhang, Carson Eisenach, Sham Kakade, Dean Foster, and Udaya Ghai. Mind the gap: Examining the self-improvement capabilities of large language models. arXiv preprint arXiv:2412.02674, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 245, + 504, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 245, + 504, + 278 + ], + "spans": [ + { + "bbox": [ + 106, + 245, + 504, + 278 + ], + "type": "text", + "content": "[43] Zhengxuan Wu, Aryaman Arora, Zheng Wang, Atticus Geiger, Dan Jurafsky, Christopher D Manning, and Christopher Potts. Reft: Representation finetuning for language models. Advances in Neural Information Processing Systems, 37:63908-63962, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 281, + 425, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 281, + 425, + 294 + ], + "spans": [ + { + "bbox": [ + 106, + 281, + 425, + 294 + ], + "type": "text", + "content": "[44] George Casella and Roger Berger. Statistical inference. CRC press, 2024." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 168, + 86 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 168, + 86 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 168, + 86 + ], + "type": "text", + "content": "Appendices" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 99, + 505, + 323 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 106, + 99, + 505, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 99, + 505, + 111 + ], + "spans": [ + { + "bbox": [ + 106, + 99, + 505, + 111 + ], + "type": "text", + "content": "A Prompt Templates 13" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 125, + 504, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 125, + 504, + 137 + ], + "spans": [ + { + "bbox": [ + 106, + 125, + 504, + 137 + ], + "type": "text", + "content": "B Case Study 15" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 152, + 504, + 164 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 152, + 504, + 164 + ], + "spans": [ + { + "bbox": [ + 106, + 152, + 504, + 164 + ], + "type": "text", + "content": "C Implementation Details about Semantic Clustering 16" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 178, + 504, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 178, + 504, + 190 + ], + "spans": [ + { + "bbox": [ + 106, + 178, + 504, + 190 + ], + "type": "text", + "content": "D Additional Results on Llama3 Model Series 16" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 205, + 504, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 205, + 504, + 217 + ], + "spans": [ + { + "bbox": [ + 106, + 205, + 504, + 217 + ], + "type": "text", + "content": "E Additional Training Details 18" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 232, + 504, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 232, + 504, + 243 + ], + "spans": [ + { + "bbox": [ + 106, + 232, + 504, + 243 + ], + "type": "text", + "content": "F Computational Cost of Semantic Clustering 18" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 258, + 504, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 258, + 504, + 270 + ], + "spans": [ + { + "bbox": [ + 106, + 258, + 504, + 270 + ], + "type": "text", + "content": "G Details of Prompt Collection 19" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 285, + 504, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 285, + 504, + 296 + ], + "spans": [ + { + "bbox": [ + 106, + 285, + 504, + 296 + ], + "type": "text", + "content": "H Additional Result about Pass@k 19" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 311, + 504, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 311, + 504, + 323 + ], + "spans": [ + { + "bbox": [ + 106, + 311, + 504, + 323 + ], + "type": "text", + "content": "I The Influence of Clustering Quality on the Performance of EMPO 19" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 338, + 223, + 353 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 338, + 223, + 353 + ], + "spans": [ + { + "bbox": [ + 105, + 338, + 223, + 353 + ], + "type": "text", + "content": "A Prompt Templates" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 363, + 371, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 363, + 371, + 376 + ], + "spans": [ + { + "bbox": [ + 105, + 363, + 371, + 376 + ], + "type": "text", + "content": "We provide the prompt templates used for training and evaluation." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 380, + 506, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 506, + 447 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 506, + 447 + ], + "type": "text", + "content": "For mathematical reasoning tasks, we adopt the following reasoning prompt template similar to Online-DPO-R1 project [30] for both training and testing. During testing, we found that by adding system prompt, the accuracy of Qwen2.5-Math Base model can be better on mathematical benchmarks. However, system prompt would not help in natural reasoning tasks. Thus we use the same test prompt (start with system prompt) for both Base model and finetuned models in mathematical tasks. In natural reasoning tasks, we do not add system prompt for Base models." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 115, + 454, + 373, + 466 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 454, + 373, + 466 + ], + "spans": [ + { + "bbox": [ + 115, + 454, + 373, + 466 + ], + "type": "text", + "content": "Mathematical Reasoning Training and Evaluation Template" + } + ] + } + ], + "index": 14 + }, + { + "type": "code", + "bbox": [ + 114, + 474, + 436, + 550 + ], + "blocks": [ + { + "bbox": [ + 114, + 474, + 436, + 550 + ], + "lines": [ + { + "bbox": [ + 114, + 474, + 436, + 550 + ], + "spans": [ + { + "bbox": [ + 114, + 474, + 436, + 550 + ], + "type": "text", + "content": "system \nPlease reason step by step, and output your final answer within \\boxed{}}. \n \nuser \n{Question} Let's think step by step and output the final answer within \\boxed{}}. \n \nassistant" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "code_body" + } + ], + "index": 15, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 105, + 561, + 504, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 561, + 504, + 585 + ], + "spans": [ + { + "bbox": [ + 105, + 561, + 504, + 585 + ], + "type": "text", + "content": "To train models with our EMPO for free-form natural reasoning tasks, we adopt the following reasoning prompt template similar to that we used in mathematical tasks for training." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 115, + 592, + 325, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 592, + 325, + 604 + ], + "spans": [ + { + "bbox": [ + 115, + 592, + 325, + 604 + ], + "type": "text", + "content": "Free-form Natural Reasoning Training Template" + } + ] + } + ], + "index": 17 + }, + { + "type": "code", + "bbox": [ + 114, + 612, + 422, + 689 + ], + "blocks": [ + { + "bbox": [ + 114, + 612, + 422, + 689 + ], + "lines": [ + { + "bbox": [ + 114, + 612, + 422, + 689 + ], + "spans": [ + { + "bbox": [ + 114, + 612, + 422, + 689 + ], + "type": "text", + "content": "system \nReason step by step, and output your final answer within \\boxed{}?. \n \nuser \n{Question} Reason step by step and output the final answer within \\boxed{}?. \n \nassistant" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "code_body" + } + ], + "index": 18, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 105, + 699, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 699, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 504, + 723 + ], + "type": "text", + "content": "Since the MMLU-Pro and GPQA are both close-formed multi-choice benchmark. To evaluate the natural reasoning capability of the models, we use the following prompt template during testing." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 116, + 73, + 301, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 73, + 301, + 85 + ], + "spans": [ + { + "bbox": [ + 116, + 73, + 301, + 85 + ], + "type": "text", + "content": "MMLU Pro Test Template for Base Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 115, + 92, + 495, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 92, + 495, + 114 + ], + "spans": [ + { + "bbox": [ + 115, + 92, + 495, + 114 + ], + "type": "text", + "content": "Question: {Question} Reason step by step and output the final answer (the correct letter choice from A-P) within \\boxed{}}." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 115, + 151, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 115, + 151, + 125 + ], + "spans": [ + { + "bbox": [ + 115, + 115, + 151, + 125 + ], + "type": "text", + "content": "Answer:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 115, + 165, + 272, + 177 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 165, + 272, + 177 + ], + "spans": [ + { + "bbox": [ + 115, + 165, + 272, + 177 + ], + "type": "text", + "content": "Few Shot MMLU Pro Test Template" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 115, + 184, + 495, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 184, + 495, + 207 + ], + "spans": [ + { + "bbox": [ + 115, + 184, + 495, + 207 + ], + "type": "text", + "content": "Question: {Question in Demonstration 1} Reason step by step and output the final answer (the correct letter choice from A-P) within \\boxed{}" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 207, + 495, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 207, + 495, + 228 + ], + "spans": [ + { + "bbox": [ + 115, + 207, + 495, + 228 + ], + "type": "text", + "content": "Answer: Let's reason step by step. CoT of Demonstration 1 Therefore, the correct answer is Answer of Demonstration 1." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 115, + 236, + 290, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 236, + 290, + 251 + ], + "spans": [ + { + "bbox": [ + 115, + 236, + 290, + 251 + ], + "type": "text", + "content": "(Omit more demonstrations for readability)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 115, + 257, + 495, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 257, + 495, + 285 + ], + "spans": [ + { + "bbox": [ + 115, + 257, + 495, + 285 + ], + "type": "text", + "content": "... Question: {Question} Reason step by step and output the final answer (the correct letter choice from A-P) within \\boxed{}}." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 285, + 151, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 285, + 151, + 293 + ], + "spans": [ + { + "bbox": [ + 115, + 285, + 151, + 293 + ], + "type": "text", + "content": "Answer:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 115, + 334, + 386, + 346 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 334, + 386, + 346 + ], + "spans": [ + { + "bbox": [ + 115, + 334, + 386, + 346 + ], + "type": "text", + "content": "MMLU Pro Test Template for Finetuned Models (SFT and RL)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 115, + 354, + 495, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 354, + 495, + 387 + ], + "spans": [ + { + "bbox": [ + 115, + 354, + 495, + 387 + ], + "type": "text", + "content": "system \nReason step by step, and output your final answer (the correct letter choice from A-P) within \n\\boxed{}" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 115, + 387, + 180, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 387, + 180, + 397 + ], + "spans": [ + { + "bbox": [ + 115, + 387, + 180, + 397 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 398, + 181, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 398, + 181, + 407 + ], + "spans": [ + { + "bbox": [ + 115, + 398, + 181, + 407 + ], + "type": "text", + "content": "user" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 115, + 408, + 495, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 408, + 495, + 430 + ], + "spans": [ + { + "bbox": [ + 115, + 408, + 495, + 430 + ], + "type": "text", + "content": "{Question} Reason step by step and output the final answer (the correct letter choice from A-P) within \\boxed{}" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 115, + 430, + 161, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 430, + 161, + 441 + ], + "spans": [ + { + "bbox": [ + 115, + 430, + 161, + 441 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 115, + 441, + 198, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 441, + 198, + 452 + ], + "spans": [ + { + "bbox": [ + 115, + 441, + 198, + 452 + ], + "type": "text", + "content": "assistant" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 115, + 493, + 272, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 493, + 272, + 504 + ], + "spans": [ + { + "bbox": [ + 115, + 493, + 272, + 504 + ], + "type": "text", + "content": "GPQA Test Prompt for Base Models" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 115, + 512, + 495, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 512, + 495, + 534 + ], + "spans": [ + { + "bbox": [ + 115, + 512, + 495, + 534 + ], + "type": "text", + "content": "Question: {Question} Reason step by step and output the final answer (A, B, C, or D) within \\boxed{}" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 115, + 535, + 151, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 535, + 151, + 544 + ], + "spans": [ + { + "bbox": [ + 115, + 535, + 151, + 544 + ], + "type": "text", + "content": "Answer:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 115, + 585, + 314, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 585, + 314, + 597 + ], + "spans": [ + { + "bbox": [ + 115, + 585, + 314, + 597 + ], + "type": "text", + "content": "Few Shot GPQA Test Prompt for Base Models" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 115, + 604, + 495, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 604, + 495, + 627 + ], + "spans": [ + { + "bbox": [ + 115, + 604, + 495, + 627 + ], + "type": "text", + "content": "Question: {Question in Demonstration 1} Reason step by step and output the final answer (A, B, C, or D) within \\boxed{}" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 115, + 627, + 495, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 627, + 495, + 648 + ], + "spans": [ + { + "bbox": [ + 115, + 627, + 495, + 648 + ], + "type": "text", + "content": "Answer: Let's reason step by step. CoT in Demonstration 1 Therefore, the correct final answer is Answer in Demonstration 1." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 115, + 654, + 286, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 654, + 286, + 671 + ], + "spans": [ + { + "bbox": [ + 115, + 654, + 286, + 671 + ], + "type": "text", + "content": "(Omit more demonstration for readability)" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 115, + 677, + 495, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 677, + 495, + 703 + ], + "spans": [ + { + "bbox": [ + 115, + 677, + 495, + 703 + ], + "type": "text", + "content": "... Question: {Question} Reason step by step and output the final answer (A, B, C, or D) within \\boxed{}" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 115, + 704, + 151, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 704, + 151, + 713 + ], + "spans": [ + { + "bbox": [ + 115, + 704, + 151, + 713 + ], + "type": "text", + "content": "Answer:" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 115, + 73, + 358, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 73, + 358, + 85 + ], + "spans": [ + { + "bbox": [ + 115, + 73, + 358, + 85 + ], + "type": "text", + "content": "GPQA Test Prompt for Finetuned Models (SFT and RL)" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 114, + 93, + 192, + 104 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 93, + 192, + 104 + ], + "spans": [ + { + "bbox": [ + 114, + 93, + 192, + 104 + ], + "type": "text", + "content": "system" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 114, + 104, + 447, + 115 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 104, + 447, + 115 + ], + "spans": [ + { + "bbox": [ + 114, + 104, + 447, + 115 + ], + "type": "text", + "content": "Reason step by step, and output your final answer (A, B, C, or D) within \\boxed{}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 115, + 115, + 161, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 115, + 161, + 125 + ], + "spans": [ + { + "bbox": [ + 115, + 115, + 161, + 125 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 115, + 127, + 181, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 127, + 181, + 137 + ], + "spans": [ + { + "bbox": [ + 115, + 127, + 181, + 137 + ], + "type": "text", + "content": "user" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 137, + 485, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 137, + 485, + 148 + ], + "spans": [ + { + "bbox": [ + 115, + 137, + 485, + 148 + ], + "type": "text", + "content": "{Question} Reason step by step and output the final answer (A, B, C, or D) within \\boxed{}" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 115, + 148, + 162, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 148, + 162, + 158 + ], + "spans": [ + { + "bbox": [ + 115, + 148, + 162, + 158 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 115, + 159, + 198, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 159, + 198, + 169 + ], + "spans": [ + { + "bbox": [ + 115, + 159, + 198, + 169 + ], + "type": "text", + "content": "assistant" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 194, + 186, + 208 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 194, + 186, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 194, + 186, + 208 + ], + "type": "text", + "content": "B Case Study" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 115, + 223, + 378, + 235 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 223, + 378, + 235 + ], + "spans": [ + { + "bbox": [ + 115, + 223, + 378, + 235 + ], + "type": "text", + "content": "Qwen2.5-Math-7B's Answer to the 18-th Question in AMC23" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 114, + 242, + 495, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 242, + 495, + 320 + ], + "spans": [ + { + "bbox": [ + 114, + 242, + 495, + 320 + ], + "type": "text", + "content": "Question: Janet rolls a standard 6-sided die 4 times and keeps a running total of the numbers she rolls. What is the probability that at some point, her running total will equal 3? The final answer can be written in the form " + }, + { + "bbox": [ + 114, + 242, + 495, + 320 + ], + "type": "inline_equation", + "content": "\\frac{m}{n}" + }, + { + "bbox": [ + 114, + 242, + 495, + 320 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 114, + 242, + 495, + 320 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 114, + 242, + 495, + 320 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 114, + 242, + 495, + 320 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 114, + 242, + 495, + 320 + ], + "type": "text", + "content": " are relatively prime positive integers. What is " + }, + { + "bbox": [ + 114, + 242, + 495, + 320 + ], + "type": "inline_equation", + "content": "m + n" + }, + { + "bbox": [ + 114, + 242, + 495, + 320 + ], + "type": "text", + "content": "? Response: To solve this problem, we need to determine the probability that at some point during 4 rolls of a standard 6-sided die, the running total will equal 3. We can use dynamic programming to keep track of the probabilities of each possible running total after each roll. Here's the plan:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 115, + 320, + 494, + 396 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 115, + 320, + 490, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 320, + 490, + 331 + ], + "spans": [ + { + "bbox": [ + 115, + 320, + 490, + 331 + ], + "type": "text", + "content": "1. Initialize a list to keep track of the probability of each possible running total after each roll." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 331, + 483, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 331, + 483, + 341 + ], + "spans": [ + { + "bbox": [ + 115, + 331, + 483, + 341 + ], + "type": "text", + "content": "2. Start with the initial condition that the probability of a running total of 0 after 0 rolls is 1." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 115, + 342, + 493, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 342, + 493, + 363 + ], + "spans": [ + { + "bbox": [ + 115, + 342, + 493, + 363 + ], + "type": "text", + "content": "3. For each roll, update the probabilities of each possible running total by considering the outcomes of rolling a 1, 2, 3, 4, 5, or 6." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 115, + 363, + 422, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 363, + 422, + 374 + ], + "spans": [ + { + "bbox": [ + 115, + 363, + 422, + 374 + ], + "type": "text", + "content": "4. After each roll, check if the probability of a running total of 3 is non-zero." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 115, + 374, + 494, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 374, + 494, + 396 + ], + "spans": [ + { + "bbox": [ + 115, + 374, + 494, + 396 + ], + "type": "text", + "content": "5. The final answer will be the probability of a running total of 3 after 4 rolls. Let's implement this in Python:" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 115, + 402, + 125, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 402, + 125, + 407 + ], + "spans": [ + { + "bbox": [ + 115, + 402, + 125, + 407 + ], + "type": "text", + "content": "···" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 115, + 407, + 285, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 407, + 285, + 418 + ], + "spans": [ + { + "bbox": [ + 115, + 407, + 285, + 418 + ], + "type": "text", + "content": "(We omit the python code for readability.)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 115, + 423, + 125, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 423, + 125, + 428 + ], + "spans": [ + { + "bbox": [ + 115, + 423, + 125, + 428 + ], + "type": "text", + "content": "···" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 115, + 429, + 494, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 429, + 494, + 461 + ], + "spans": [ + { + "bbox": [ + 115, + 429, + 494, + 461 + ], + "type": "text", + "content": "It looks like there was an issue with the implementation. The probability of the running total being exactly 3 after 4 rolls should not be zero, so let's correct the approach and re-implement the solution. Here's the corrected plan:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 115, + 462, + 494, + 527 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 115, + 462, + 490, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 462, + 490, + 472 + ], + "spans": [ + { + "bbox": [ + 115, + 462, + 490, + 472 + ], + "type": "text", + "content": "1. Initialize a list to keep track of the probability of each possible running total after each roll." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 115, + 472, + 482, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 472, + 482, + 483 + ], + "spans": [ + { + "bbox": [ + 115, + 472, + 482, + 483 + ], + "type": "text", + "content": "2. Start with the initial condition that the probability of a running total of 0 after 0 rolls is 1." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 115, + 483, + 494, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 483, + 494, + 505 + ], + "spans": [ + { + "bbox": [ + 115, + 483, + 494, + 505 + ], + "type": "text", + "content": "3. For each roll, update the probabilities of each possible running total by considering the outcomes of rolling a 1, 2, 3, 4, 5, or 6." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 115, + 506, + 422, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 506, + 422, + 516 + ], + "spans": [ + { + "bbox": [ + 115, + 506, + 422, + 516 + ], + "type": "text", + "content": "4. After each roll, check if the probability of a running total of 3 is non-zero." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 115, + 516, + 425, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 516, + 425, + 527 + ], + "spans": [ + { + "bbox": [ + 115, + 516, + 425, + 527 + ], + "type": "text", + "content": "5. The final answer will be the probability of a running total of 3 after 4 rolls." + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 115, + 528, + 254, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 528, + 254, + 538 + ], + "spans": [ + { + "bbox": [ + 115, + 528, + 254, + 538 + ], + "type": "text", + "content": "Let's re-implement this in Python:" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 115, + 545, + 125, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 545, + 125, + 550 + ], + "spans": [ + { + "bbox": [ + 115, + 545, + 125, + 550 + ], + "type": "text", + "content": "···" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 115, + 549, + 285, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 549, + 285, + 560 + ], + "spans": [ + { + "bbox": [ + 115, + 549, + 285, + 560 + ], + "type": "text", + "content": "(We omit the python code for readability.)" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 115, + 569, + 494, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 569, + 494, + 592 + ], + "spans": [ + { + "bbox": [ + 115, + 569, + 494, + 592 + ], + "type": "text", + "content": "It seems there is still an issue with the implementation. Let's manually check the logic and correct it step by step." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 115, + 593, + 494, + 670 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 115, + 593, + 494, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 593, + 494, + 613 + ], + "spans": [ + { + "bbox": [ + 115, + 593, + 494, + 613 + ], + "type": "text", + "content": "1. Initialize the probability list with the initial condition that the probability of a running total of 0 after 0 rolls is 1." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 115, + 614, + 494, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 614, + 494, + 635 + ], + "spans": [ + { + "bbox": [ + 115, + 614, + 494, + 635 + ], + "type": "text", + "content": "2. For each roll, update the probabilities of each possible running total by considering the outcomes of rolling a 1, 2, 3, 4, 5, or 6." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 115, + 636, + 422, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 636, + 422, + 647 + ], + "spans": [ + { + "bbox": [ + 115, + 636, + 422, + 647 + ], + "type": "text", + "content": "3. After each roll, check if the probability of a running total of 3 is non-zero." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 115, + 647, + 494, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 647, + 494, + 670 + ], + "spans": [ + { + "bbox": [ + 115, + 647, + 494, + 670 + ], + "type": "text", + "content": "4. The final answer will be the probability of a running total of 3 after 4 rolls. Let's manually simulate the process: - After 0 rolls: Probability of 0 is 1" + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 115, + 675, + 124, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 675, + 124, + 679 + ], + "spans": [ + { + "bbox": [ + 115, + 675, + 124, + 679 + ], + "type": "text", + "content": "···" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 115, + 680, + 494, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 680, + 494, + 715 + ], + "spans": [ + { + "bbox": [ + 115, + 680, + 494, + 715 + ], + "type": "text", + "content": "The probability of a running total of 3 after 4 rolls is " + }, + { + "bbox": [ + 115, + 680, + 494, + 715 + ], + "type": "inline_equation", + "content": "4 / 1296" + }, + { + "bbox": [ + 115, + 680, + 494, + 715 + ], + "type": "text", + "content": ", which simplifies to " + }, + { + "bbox": [ + 115, + 680, + 494, + 715 + ], + "type": "inline_equation", + "content": "1 / 324" + }, + { + "bbox": [ + 115, + 680, + 494, + 715 + ], + "type": "text", + "content": ". Therefore, the final answer in the form of " + }, + { + "bbox": [ + 115, + 680, + 494, + 715 + ], + "type": "inline_equation", + "content": "m / n" + }, + { + "bbox": [ + 115, + 680, + 494, + 715 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 115, + 680, + 494, + 715 + ], + "type": "inline_equation", + "content": "1 / 324" + }, + { + "bbox": [ + 115, + 680, + 494, + 715 + ], + "type": "text", + "content": ", and the sum " + }, + { + "bbox": [ + 115, + 680, + 494, + 715 + ], + "type": "inline_equation", + "content": "m + n" + }, + { + "bbox": [ + 115, + 680, + 494, + 715 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 115, + 680, + 494, + 715 + ], + "type": "inline_equation", + "content": "1 + 324 = 325" + }, + { + "bbox": [ + 115, + 680, + 494, + 715 + ], + "type": "text", + "content": ". The final answer is " + }, + { + "bbox": [ + 115, + 680, + 494, + 715 + ], + "type": "inline_equation", + "content": "\\boxed{325}" + }, + { + "bbox": [ + 115, + 680, + 494, + 715 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 37 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 38 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 389, + 86 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 389, + 86 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 389, + 86 + ], + "type": "text", + "content": "C Implementation Details about Semantic Clustering" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 94, + 506, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 94, + 506, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 94, + 506, + 140 + ], + "type": "text", + "content": "We detail the implementation of semantic clustering in our EMP0. The semantic cluster process is shown as Algorithm 1. We also provide the simplified python code to show how we determine whether two model outputs are equivalent or not in mathematical reasoning (Algorithm 2) and free-form natural reasoning tasks (Algorithm 3)." + } + ] + } + ], + "index": 1 + }, + { + "type": "code", + "bbox": [ + 97, + 166, + 433, + 345 + ], + "blocks": [ + { + "bbox": [ + 106, + 152, + 247, + 164 + ], + "lines": [ + { + "bbox": [ + 106, + 152, + 247, + 164 + ], + "spans": [ + { + "bbox": [ + 106, + 152, + 247, + 164 + ], + "type": "text", + "content": "Algorithm 1: Semantic Clustering" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "lines": [ + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "spans": [ + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "text", + "content": "Require : question " + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "text", + "content": " , a group set of model response " + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "inline_equation", + "content": "\\{o_2,\\dots,o_G\\}" + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "text", + "content": " , verifier " + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "inline_equation", + "content": "\\nu" + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "text", + "content": " Initialize: " + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "inline_equation", + "content": "C = \\{o_1\\}" + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "text", + "content": " \nfor " + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "inline_equation", + "content": "2\\leq i\\leq G" + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "text", + "content": " do \nfor " + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "inline_equation", + "content": "c\\in C" + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "text", + "content": " do // Random choose one element from " + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "text", + "content": " for comparison " + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "inline_equation", + "content": "o_c = c[0]" + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "text", + "content": " // Is the meaning of old sequence equivalent to new one? if " + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "inline_equation", + "content": "\\mathcal{V}(q,o_c,o_i) ==" + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "text", + "content": " True then // Put into existing class " + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "inline_equation", + "content": "c = c\\cup \\{o_i\\}" + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "text", + "content": " break \nend \nend \n// " + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "inline_equation", + "content": "o_i" + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "text", + "content": " is semantically distinct, belongs to a novel cluster. \n" + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "inline_equation", + "content": "C\\gets C\\cup \\{o_i\\}" + }, + { + "bbox": [ + 97, + 166, + 433, + 345 + ], + "type": "text", + "content": " \nend \nReturn :C" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "code", + "guess_lang": "verilog" + }, + { + "type": "code", + "bbox": [ + 113, + 383, + 436, + 446 + ], + "blocks": [ + { + "bbox": [ + 105, + 362, + 406, + 374 + ], + "lines": [ + { + "bbox": [ + 105, + 362, + 406, + 374 + ], + "spans": [ + { + "bbox": [ + 105, + 362, + 406, + 374 + ], + "type": "text", + "content": "Algorithm 2: Implementation of verifier for mathematical reasoning tasks." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 113, + 383, + 436, + 446 + ], + "lines": [ + { + "bbox": [ + 113, + 383, + 436, + 446 + ], + "spans": [ + { + "bbox": [ + 113, + 383, + 436, + 446 + ], + "type": "text", + "content": "from math_VERIFY import parse, verify \ndef are_equivalent (model_output_1, model_output_2) prediction_1 " + }, + { + "bbox": [ + 113, + 383, + 436, + 446 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 113, + 383, + 436, + 446 + ], + "type": "text", + "content": " parse(model_output_1) prediction_2 " + }, + { + "bbox": [ + 113, + 383, + 436, + 446 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 113, + 383, + 436, + 446 + ], + "type": "text", + "content": " parse(model_output_2) return verify(prediction_1,prediction_2)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + } + ], + "index": 5, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 105, + 468, + 351, + 481 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 468, + 351, + 481 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 351, + 481 + ], + "type": "text", + "content": "D Additional Results on Llama3 Model Series" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 492, + 506, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 492, + 506, + 571 + ], + "spans": [ + { + "bbox": [ + 104, + 492, + 506, + 571 + ], + "type": "text", + "content": "We conduct additional experiments to validate the efficacy of our EMP0 on other model series beyond Qwen2.5. The results are shown in Table 3. Consistent with other concurrent practice, we are unable to implement R1-Zero-like training on the Llama series, i.e., directly initializing RL process from the Base model without SFT). Thus, we instead consider a semi-supervised learning approach by initializing from instruct-tuned model and enhance the reasoning capability with our EMP0. As shown in Table 3, when initialize from Llama3.2-3B-Instruct model, our EMP0 can also substantially improve reasoning capability of instruct-tuned model which have undergone carefully-designed post-training." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 574, + 504, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 574, + 504, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 574, + 504, + 586 + ], + "type": "text", + "content": "Why Qwen2.5 Base model can initialize fully unsupervised RL training, while Llama3 can not?" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 590, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 590, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 506, + 723 + ], + "type": "text", + "content": "Consistent with open-source community practices, we found that R1-Zero-like RL training can only be reproduced unsupervised on Qwen2.5 series Base models. In contrast, Llama3 series model still necessitate \"cold-start\", i.e., SFT, before RL. Specifically, in our experiments, the Qwen2.5 Base models demonstrated inherent answer consistency from the initial stages of EMPO training. However, Llama3 series Base models suffer severe inconsistency and fail to convergence during training. We hypothesize this divergence stems from Qwen2.5's pretraining strategy. As mentioned in the technical report [5], the pretrain data corpus are mixed with both web text and QA pairs generated by instruct-tuned Qwen2 models. This endows Qwen2.5 Base models with native instruction-following capabilities. Experimental evidence supports this hypothesis. As shown in Table 2, Qwen2.5 Base models successfully follow the instruction such as \"put the final answer (A-P) within box\" when answering multiple-choice questions from MMLU Pro and achieve an accuracy notably higher than random guess." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 111, + 164, + 527, + 639 + ], + "blocks": [ + { + "bbox": [ + 105, + 146, + 381, + 158 + ], + "lines": [ + { + "bbox": [ + 105, + 146, + 381, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 146, + 381, + 158 + ], + "type": "text", + "content": "Algorithm 3: Implementation of verifier for natural reasoning tasks." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 111, + 164, + 527, + 639 + ], + "lines": [ + { + "bbox": [ + 111, + 164, + 527, + 639 + ], + "spans": [ + { + "bbox": [ + 111, + 164, + 527, + 639 + ], + "type": "text", + "content": "{\n verifier = AutoModelForCausalLM.from_pretrained(...);\n tokenizer = AutoTokenizer.from_pretrained(...);\n}\ndef are_equivalent(model_output_1, model_output_2, question, verifier)\n prediction_1 = parse(model_output_1)\n prediction_2 = parse(model_output_2)\n prompt = (\n f\"User: ## Question: {question}\\n\\n\"\n f\"## Ground Truth Answer: {prediction_1}\\n\\n\"\n f\"## Student Answer: {prediction_2}\\n\\n\"\n \"For the above question, please verify if the student's answer is equivalent to the ground truth answer.\\n\"\n \"Do not solve the question by yourself; just check if the student's answer is equivalent to the ground truth answer.\\n\"\n \"If correct, output Final Decision: Yes\".\n \"If incorrect, output Final Decision: No\\..\\n\"\n \"Assistant: Final Decision: \"\n )\n inputs = selftokenizer(modified_prompt, return_tensors=\"pt\").to(self.model_device)\n input_ids = inputs-input_ids\n # inference for output logits\n with torch.inference_mode():\n outputs = self.model.forward(input_ids)\n logits = outputs.logits\n # get next output logits\n next_token_logits = logits[0, input_ids.shape[1] - 1, :]\n # get the token ID of \"Yes\" and \"No\"\n decision_tokens = selftokenizer(\"Yes\", \"No\")\n yes_id = decision_tokens.input_ids[0]\n no_id = decision_tokens.input_ids[1]\n # calculate probability\n probs = torch softmax(next_token_logits, dim=0)\n yes_prob = probs[yes_id].item()\n no_prob = probs[no_id].item()\n return yes_prob > no_prob" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "python" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 86, + 507, + 224 + ], + "blocks": [ + { + "bbox": [ + 185, + 69, + 424, + 82 + ], + "lines": [ + { + "bbox": [ + 185, + 69, + 424, + 82 + ], + "spans": [ + { + "bbox": [ + 185, + 69, + 424, + 82 + ], + "type": "text", + "content": "Table 3: Accuracy on mathematical reasoning benchmarks." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 86, + 507, + 224 + ], + "lines": [ + { + "bbox": [ + 106, + 86, + 507, + 224 + ], + "spans": [ + { + "bbox": [ + 106, + 86, + 507, + 224 + ], + "type": "table", + "html": "
SupervisionMATHMinerva MathOMNIAIME24AMC23Avg.
frontier model
Llama-3.1-70B-Instruct{q,r,a}64.635.331.916.730.135.7
Eurus-2-7B-PRIME{q,r,a}79.238.642.126.757.848.9
1B model
Llama3.2-InstructNone27.25.15.60.010.09.6
Llama3.2-Instruct w/GRPO{q,a}29.83.76.40.012.510.5
Llama3.2-Instruct w/EMPO{q}31.05.17.93.37.511.0
3B model
Llama3.2-InstructNone46.219.115.33.320.020.8
Llama3.2-Instruct w/GRPO{q,a}49.222.417.613.332.527.0
Llama3.2-Instruct w/EMPO{q}49.820.218.413.330.026.3
", + "image_path": "60a4a919becdc853bd38aaa5ce700b90b4c33d2b4e994f1e89f907e7218a2031.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 248, + 269, + 262 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 248, + 269, + 262 + ], + "spans": [ + { + "bbox": [ + 105, + 248, + 269, + 262 + ], + "type": "text", + "content": "E Additional Training Details" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 277, + 506, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 277, + 506, + 301 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 506, + 301 + ], + "type": "text", + "content": "We provide a brief summary of our training recipes in Table 4. Besides, we have release the code in the supplementary materials which contained the full training configurations for re-implementation." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 165, + 334, + 447, + 398 + ], + "blocks": [ + { + "bbox": [ + 162, + 316, + 448, + 328 + ], + "lines": [ + { + "bbox": [ + 162, + 316, + 448, + 328 + ], + "spans": [ + { + "bbox": [ + 162, + 316, + 448, + 328 + ], + "type": "text", + "content": "Table 4: A brief summary of training recipes of Qwen2.5 Base models." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 165, + 334, + 447, + 398 + ], + "lines": [ + { + "bbox": [ + 165, + 334, + 447, + 398 + ], + "spans": [ + { + "bbox": [ + 165, + 334, + 447, + 398 + ], + "type": "table", + "html": "
1.5B-Math7B-Math3B7B14B
Number of generations77121212
Learning rate3e-73e-73e-73e-73e-7
Max completion length2048204810241024768
Batch size per GPU12111
", + "image_path": "bcab8cb78366e87f8dd8e4a498f6c241b7dd8324c9b304d720703a5b37b80b08.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 433, + 353, + 448 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 433, + 353, + 448 + ], + "spans": [ + { + "bbox": [ + 104, + 433, + 353, + 448 + ], + "type": "text", + "content": "F Computational Cost of Semantic Clustering" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 462, + 506, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 462, + 506, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 462, + 506, + 594 + ], + "type": "text", + "content": "Given the number of responses sampled per question " + }, + { + "bbox": [ + 104, + 462, + 506, + 594 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 462, + 506, + 594 + ], + "type": "text", + "content": " (i.e., the group size) and the training dataset size " + }, + { + "bbox": [ + 104, + 462, + 506, + 594 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 462, + 506, + 594 + ], + "type": "text", + "content": ", the time complexity of the clustering process is " + }, + { + "bbox": [ + 104, + 462, + 506, + 594 + ], + "type": "inline_equation", + "content": "O(G^2 \\times N)" + }, + { + "bbox": [ + 104, + 462, + 506, + 594 + ], + "type": "text", + "content": ". In mathematical reasoning tasks, semantic clustering is implemented by regular expressions which do not involve notable computational cost. For natural reasoning tasks, we rely on an additional compact small language model. To evaluate the additional computational overhead introduced by semantic clustering in EMPO, we conducted comparative analyses of EMPO and GRPO in terms of total training duration and GPU memory utilization. The results of mathematical reasoning and natural reasoning are shown in Table 6, respectively. It is worthy to note that the 14B model experiments require slightly less computational time than the 7B model. This is because, in our 14B experiments, we reduced the batch size and maximum response length from 2 and 1024 to 1 and 768, respectively, compared to the 3B and 7B configurations. This adjustment was made to fit the limited GPU memory of one single " + }, + { + "bbox": [ + 104, + 462, + 506, + 594 + ], + "type": "inline_equation", + "content": "8 \\times \\mathrm{A}100" + }, + { + "bbox": [ + 104, + 462, + 506, + 594 + ], + "type": "text", + "content": " 80G machine." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 609, + 506, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 609, + 506, + 643 + ], + "spans": [ + { + "bbox": [ + 104, + 609, + 506, + 643 + ], + "type": "text", + "content": "Table 5: Comparison of total runtime (measured as " + }, + { + "bbox": [ + 104, + 609, + 506, + 643 + ], + "type": "inline_equation", + "content": "8 \\times" + }, + { + "bbox": [ + 104, + 609, + 506, + 643 + ], + "type": "text", + "content": " A100 GPU hours) and storage cost (measured by max total GPU memory (GiB) utilization) between GRPO and EMP0. The GPU Memory semantic cluster process requires minimal computation and storage." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 181, + 647, + 430, + 708 + ], + "blocks": [ + { + "bbox": [ + 181, + 647, + 430, + 708 + ], + "lines": [ + { + "bbox": [ + 181, + 647, + 430, + 708 + ], + "spans": [ + { + "bbox": [ + 181, + 647, + 430, + 708 + ], + "type": "table", + "html": "
Qwen2.5-1.5B-MathQwen2.5-7B-Math
GPU HoursGPU MemGPU HoursGPU Mem
GRPO11.2240.48.5501.3
EMPO11.7208.28.7532.7
", + "image_path": "2f2bfbad48ec71e771da0d9c1a85dcef5bdda49c28e044cbd20eadabff19c212.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 127, + 109, + 482, + 168 + ], + "blocks": [ + { + "bbox": [ + 104, + 69, + 504, + 103 + ], + "lines": [ + { + "bbox": [ + 104, + 69, + 504, + 103 + ], + "spans": [ + { + "bbox": [ + 104, + 69, + 504, + 103 + ], + "type": "text", + "content": "Table 6: Comparison of total runtime (measured as " + }, + { + "bbox": [ + 104, + 69, + 504, + 103 + ], + "type": "inline_equation", + "content": "8 \\times" + }, + { + "bbox": [ + 104, + 69, + 504, + 103 + ], + "type": "text", + "content": " A100 GPU hours) and storage cost (measured by total GPU memory (GiB) utilization) between GRPO and EMP0. The GPU Memory semantic cluster process requires minimal computation and storage." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 127, + 109, + 482, + 168 + ], + "lines": [ + { + "bbox": [ + 127, + 109, + 482, + 168 + ], + "spans": [ + { + "bbox": [ + 127, + 109, + 482, + 168 + ], + "type": "table", + "html": "
Qwen2.5-3BQwen2.5-7BQwen2.5-14B
GPU HoursGPU MemGPU HoursGPU MemGPU HoursGPU Mem
GRPO9.5274.812.4508.611.0588.2
EMPO11.1286.914.6532.711.5541.1
", + "image_path": "1099887c44ad2b598c5b47017a4cafdffed2a0d290e926bdf6596db1d87f0f65.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 186, + 275, + 200 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 186, + 275, + 200 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 275, + 200 + ], + "type": "text", + "content": "G Details of Prompt Collection" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 210, + 506, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 210, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 210, + 506, + 266 + ], + "type": "text", + "content": "For mathematical reasoning, we directly use 20,000 prompts randomly selected from Numina-Math-CoT. For free-form natural reasoning tasks, we adopt the prompts from Natural Reasoning5 by filtering out the questions with over-long prompt, reference answer. Besides, we use the response length of Llama3.3-70B-Instruct as a difficulty estimation metric, and filter out overly difficult samples with response lengths exceeding 4096 tokens. The data collection python code is demonstrated as follow:" + } + ] + } + ], + "index": 3 + }, + { + "type": "code", + "bbox": [ + 110, + 296, + 438, + 488 + ], + "blocks": [ + { + "bbox": [ + 106, + 275, + 389, + 288 + ], + "lines": [ + { + "bbox": [ + 106, + 275, + 389, + 288 + ], + "spans": [ + { + "bbox": [ + 106, + 275, + 389, + 288 + ], + "type": "text", + "content": "Algorithm 4: Python code of data filtering in a huggingface-like style." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 110, + 296, + 438, + 488 + ], + "lines": [ + { + "bbox": [ + 110, + 296, + 438, + 488 + ], + "spans": [ + { + "bbox": [ + 110, + 296, + 438, + 488 + ], + "type": "text", + "content": "from datasets import load_dataset \ndataset = load_dataset(\"facebook/Natural-Reasoning\") \nfiltered_dataset = dataset.filter( lambda x: (\n # no answer\n len(x[\"reference_answer\"])) > 0\n # over-long answer\n and len(x[\"reference_answer\"]} < 129\n # overly difficult questions\n and len(x[\"llamaresponses\"]} < 4096\n # over-long prompt\n and len(x[\"question\"]} < 512\n # proof-oriented\n and (\"prove\" not in x[\"question\"].lower())\n and (\"proof\" not in x[\"question\"].lower())\n)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + } + ], + "index": 5, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 105, + 512, + 294, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 512, + 294, + 525 + ], + "spans": [ + { + "bbox": [ + 105, + 512, + 294, + 525 + ], + "type": "text", + "content": "H Additional Result about Pass@k" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 536, + 504, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 536, + 504, + 570 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 504, + 570 + ], + "type": "text", + "content": "We provide additional visualization pass@k results of models trained with EMP0. The results are shown as follow. As shown in Figure H, the Base model consistently catch up with RL trained models when k is large." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 585, + 462, + 599 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 585, + 462, + 599 + ], + "spans": [ + { + "bbox": [ + 105, + 585, + 462, + 599 + ], + "type": "text", + "content": "I The Influence of Clustering Quality on the Performance of EMPO" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 609, + 506, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 609, + 506, + 655 + ], + "spans": [ + { + "bbox": [ + 104, + 609, + 506, + 655 + ], + "type": "text", + "content": "In our mathematical reasoning experiments, semantic clustering is achieved solely through regular expression matching without introducing additional models. Due to the naturally structured response formats in mathematical tasks, regular expression could accurately determine answer equivalence, resulting in relatively high clustering quality." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 658, + 505, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 658, + 505, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 658, + 505, + 704 + ], + "type": "text", + "content": "However, in more general free-form natural reasoning tasks where model responses are free-form much more diverse (e.g., matrix, numbers, a few lines of sentences/codes...), the clustering quality can impact EMPO's effectiveness. For instance, in our more early practice, we tried DeBERTa (a bert-like model with 300M parameters trained by microsoft) for semantic clustering. Due to" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 116, + 710, + 337, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 710, + 337, + 723 + ], + "spans": [ + { + "bbox": [ + 116, + 710, + 337, + 723 + ], + "type": "text", + "content": "5https://huggingface.co/datasets/facebook/natural_reasoning" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 76, + 294, + 194 + ], + "blocks": [ + { + "bbox": [ + 111, + 76, + 294, + 194 + ], + "lines": [ + { + "bbox": [ + 111, + 76, + 294, + 194 + ], + "spans": [ + { + "bbox": [ + 111, + 76, + 294, + 194 + ], + "type": "image", + "image_path": "2f1ddd8ff1e96a17be6f46c9ff2bb458e712e643653068eb46e98c12091fa5e9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 217, + 299, + 239 + ], + "lines": [ + { + "bbox": [ + 104, + 217, + 299, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 217, + 299, + 239 + ], + "type": "text", + "content": "Figure 5: Trend of pass@k accuracy on Math test-set." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 317, + 76, + 500, + 194 + ], + "blocks": [ + { + "bbox": [ + 317, + 76, + 500, + 194 + ], + "lines": [ + { + "bbox": [ + 317, + 76, + 500, + 194 + ], + "spans": [ + { + "bbox": [ + 317, + 76, + 500, + 194 + ], + "type": "image", + "image_path": "4db2f4e321e46330bce0ead90d1173456c617c799def72823b107ed1cd146436.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 217, + 504, + 239 + ], + "lines": [ + { + "bbox": [ + 310, + 217, + 504, + 239 + ], + "spans": [ + { + "bbox": [ + 310, + 217, + 504, + 239 + ], + "type": "text", + "content": "Figure 6: Trend of pass@k accuracy on OMNI test-set." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 258, + 506, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 258, + 506, + 336 + ], + "spans": [ + { + "bbox": [ + 104, + 258, + 506, + 336 + ], + "type": "text", + "content": "the poor quality of semantic clustering, our EMPO straggled to scale up and suffered from frequent reward hacking. Subsequently, by leveraging the general-verifier released by Tiger-Lab (a fine-tuned Qwen2.5-1.5B-Math model) for clustering, we successfully generalized EMPO to more general free-form reasoning tasks. Noted that even though this small language model undergoes supervised finetuning, it serves within our fully unsupervised framework as a fixed utility function for semantic comparison, rather than serving as an external supervisor for task-specific feedback. There are several fundamental difference between cluster model and the reward model used in supervised RL:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 344, + 504, + 462 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 132, + 344, + 504, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 344, + 504, + 388 + ], + "spans": [ + { + "bbox": [ + 132, + 344, + 504, + 388 + ], + "type": "text", + "content": "- The cluster model does not evaluate output correctness relative to input queries. It just provides pairwise comparisons between the model's own outputs. That is, it only provides binary answer about \"whether these two answers are the same?\" rather than \"which answer is better?\"." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 392, + 504, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 392, + 504, + 415 + ], + "spans": [ + { + "bbox": [ + 132, + 392, + 504, + 415 + ], + "type": "text", + "content": "- The cluster model does not provide any guidance, such as gradient information or hints on how to refine the reasoning traces." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 418, + 504, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 418, + 504, + 462 + ], + "spans": [ + { + "bbox": [ + 132, + 418, + 504, + 462 + ], + "type": "text", + "content": "- Compared to reward model or human-verifier golden answers, it can be much easier to implement such a cluster model. For example, in mathematical reasoning tasks, only regular expressions are enough for clustering. In natural reasoning tasks, a finetuned Qwen2.5-1B model can provide high quality semantic cluster results." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 470, + 506, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 470, + 506, + 515 + ], + "spans": [ + { + "bbox": [ + 104, + 470, + 506, + 515 + ], + "type": "text", + "content": "Essentially, this is related to the non-identifiability problem in statistical inference [44]. The issue of non-identifiability arises because multiple, distinct underlying states (potential \"truths,\" or more accurately, different reasoning pathways or different clusters of incorrect answers) could produce the same pattern of relational signals (i.e., the same semantic clustering results)." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05862/f5f20bac-767d-4260-82e6-943416d1d631_content_list.json b/data/2025/2504_05xxx/2504.05862/f5f20bac-767d-4260-82e6-943416d1d631_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..54f3580d4925426e72071e9651633e671331d6c2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05862/f5f20bac-767d-4260-82e6-943416d1d631_content_list.json @@ -0,0 +1,2173 @@ +[ + { + "type": "text", + "text": "Are Generative AI Agents Effective Personalized Financial Advisors?", + "text_level": 1, + "bbox": [ + 125, + 101, + 869, + 148 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Takehiro Takayanagi", + "bbox": [ + 143, + 157, + 316, + 175 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "takayanagi-takehiro590@g.ecc.u-", + "bbox": [ + 119, + 175, + 344, + 189 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "tokyo.ac.jp", + "bbox": [ + 192, + 190, + 269, + 204 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The University of Tokyo", + "bbox": [ + 148, + 205, + 313, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tokyo, Japan", + "bbox": [ + 184, + 220, + 274, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kiyoshi Izumi", + "bbox": [ + 441, + 157, + 557, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "izumi@sys.t.u-tokyo.ac.jp", + "bbox": [ + 411, + 175, + 586, + 189 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The University of Tokyo", + "bbox": [ + 416, + 190, + 581, + 204 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tokyo, Japan", + "bbox": [ + 454, + 205, + 544, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Javier Sanz-Cruzado", + "bbox": [ + 681, + 157, + 849, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "javier.sanz-", + "bbox": [ + 728, + 175, + 805, + 189 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "cruzadopuig@glasgow.ac.uk", + "bbox": [ + 671, + 190, + 864, + 204 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Glasgow", + "bbox": [ + 692, + 205, + 843, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Glasgow, United Kingdom", + "bbox": [ + 678, + 220, + 856, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Richard McCreadie", + "bbox": [ + 285, + 246, + 442, + 261 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "richard.mccreadie@glasgow.ac.uk", + "bbox": [ + 251, + 263, + 478, + 277 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Glasgow", + "bbox": [ + 287, + 279, + 439, + 292 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Glasgow, United Kingdom", + "bbox": [ + 274, + 294, + 452, + 308 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 83, + 316, + 156, + 330 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large language model-based agents are becoming increasingly popular as a low-cost mechanism to provide personalized, conversational advice, and have demonstrated impressive capabilities in relatively simple scenarios, such as movie recommendations. But how do these agents perform in complex high-stakes domains, where domain expertise is essential and mistakes carry substantial risk? This paper investigates the effectiveness of LLM-advisors in the finance domain, focusing on three distinct challenges: (1) eliciting user preferences when users themselves may be unsure of their needs, (2) providing personalized guidance for diverse investment preferences, and (3) leveraging advisor personality to build relationships and foster trust. Via a lab-based user study with 64 participants, we show that LLM-advisors often match human advisor performance when eliciting preferences, although they can struggle to resolve conflicting user needs. When providing personalized advice, the LLM was able to positively influence user behavior, but demonstrated clear failure modes. Our results show that accurate preference elicitation is key, otherwise, the LLM-advisor has little impact, or can even direct the investor toward unsuitable assets. More worryingly, users appear insensitive to the quality of advice being given, or worse these can have an inverse relationship. Indeed, users reported a preference for and increased satisfaction as well as emotional trust with LLMs adopting an extroverted persona, even though those agents provided worse advice.", + "bbox": [ + 81, + 335, + 483, + 669 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "CCS Concepts", + "text_level": 1, + "bbox": [ + 83, + 680, + 200, + 696 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "- Information systems $\\rightarrow$ Decision support systems; Personalization.", + "bbox": [ + 81, + 699, + 483, + 726 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords", + "text_level": 1, + "bbox": [ + 83, + 738, + 168, + 753 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "large language models, financial advisor, user study, generative AI", + "bbox": [ + 81, + 757, + 480, + 771 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.", + "bbox": [ + 81, + 779, + 482, + 852 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "SIGIR 2025, July 13-18, 2018, Padua, Italy", + "bbox": [ + 84, + 852, + 277, + 863 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM.", + "bbox": [ + 84, + 864, + 472, + 875 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ACM ISBN 978-x-xxxxx-xxxxx-x/YY/MM", + "bbox": [ + 84, + 875, + 269, + 883 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://doi.org/10.1145/nnnnnnn.nnnnnnn", + "bbox": [ + 84, + 883, + 284, + 895 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Iadh Ounis", + "bbox": [ + 584, + 246, + 679, + 261 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "iadh.ounis@glasgow.ac.uk", + "bbox": [ + 542, + 263, + 722, + 277 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Glasgow", + "bbox": [ + 557, + 277, + 707, + 292 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Glasgow, United Kingdom", + "bbox": [ + 544, + 294, + 720, + 308 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4cee9a16e36b20686fedd91c3c5c33e55f7ed2b3b09d0c248c6e09e369f3f9f7.jpg", + "image_caption": [ + "Figure 1: Conceptual illustration of an LLM-advisor with two stages: (1) Preference Elicitation and (2) Advisory Discussion." + ], + "image_footnote": [], + "bbox": [ + 537, + 315, + 888, + 443 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ACM Reference Format:", + "text_level": 1, + "bbox": [ + 514, + 501, + 661, + 513 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Takehiro Takayanagi, Kiyoshi Izumi, Javier Sanz-Cruzado, Richard McCreadie, and Iadh Ounis. 2025. Are Generative AI Agents Effective Personalized Financial Advisors?. In Proceedings of SIGIR 2025. ACM, New York, NY, USA, 11 pages. https://doi.org/10.1145/nnnnnnn.nnnnnnn", + "bbox": [ + 513, + 513, + 915, + 565 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 514, + 585, + 650, + 599 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Personalized advice plays a crucial role in our society, particularly in complex and high-stakes domains like healthcare and finance. Advisors and professionals in these fields use their expertise to offer personalized guidance and emotional support to their clients, leveraging people's specific preferences and/or circumstances. However, advisory services are often provided at a high cost, effectively excluding a large portion of the population from this critical advice. In the financial domain, to mitigate this issue, automated decision support systems have been widely studied, with a special focus on investment-related predictions, such as financial asset recommendations [30, 35].", + "bbox": [ + 511, + 604, + 915, + 756 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advances in natural language processing and large language models (LLMs) have significantly accelerated the development of conversational agents, presenting the potential to function as personalized assistants for information-seeking and decision-making [41]. These agents can now leverage multi-turn dialogues, enabling dynamic, mixed-initiative interactions where both users and systems can take the lead in conversations [1]. This progression has expanded the application of conversational agents to various tasks, such as recommendation, question answering, and search [12, 27, 34, 41].", + "bbox": [ + 511, + 757, + 915, + 896 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.05862v2 [cs.AI] 15 Apr 2025", + "bbox": [ + 22, + 270, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The application of these conversational agents for financial decision-making represents a much more complex scenario than others like movie recommendations, because users are not necessarily familiar with the basic terminology and concepts in this space, and mistakes carry a substantial risk that can lead to large monetary losses. While there is a growing interest in building these conversational assistants to provide automated financial advice [21], previous work has mostly targeted agents capable of handling simple inquiries [18, 36, 37]. Compared to these simple systems, helping users navigate financial decisions and market uncertainties poses a much greater challenge. Therefore, it is not yet clear how to develop systems that effectively support complex financial information-seeking and decision-making tasks.", + "bbox": [ + 81, + 106, + 480, + 286 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This work aims to close this gap by exploring the effectiveness of LLMs to act as personalized financial advisory agents. In particular, we focus on three problems: (a) eliciting investor preferences through interactive conversations, (b) providing personalized guidance to help users determine whether particular financial assets align with their preferences, and (c) leveraging the personality of the advisor to foster trust on the advisor.", + "bbox": [ + 81, + 286, + 480, + 383 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "First, the financial literature emphasizes that eliciting user preferences is central to delivering suitable advice [33]. However, it remains unclear whether current conversational technologies, particularly those powered by LLMs, can correctly elicit user preferences in specialized domains where users struggle to articulate their needs. Our work addresses this challenge in the context of financial services.", + "bbox": [ + 81, + 383, + 480, + 479 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Second, although personalization is widely regarded as important in the financial decision-support literature [30, 35], its value in a conversational setting remains uncertain. In particular, we explore whether tailoring dialogue around a user's profile and context improves financial decision-making. Additionally, we also explore how personalization influences user perceptions of the advisor, in terms of aspects like trust and satisfaction.", + "bbox": [ + 81, + 479, + 480, + 575 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Finally, in personalized advisory settings within high-stakes domains, the relationship and trust between the client and advisor play a crucial role [21]. Research on conversational agents suggests that agent personality significantly affects users' perceptions of the system [4, 32]. However, it remains unclear how an advisor's personality in the financial domain influences both the quality of users' financial decisions and their overall experience.", + "bbox": [ + 81, + 577, + 480, + 674 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To summarize, in this paper, we explore the following questions:", + "bbox": [ + 99, + 674, + 480, + 688 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- RQ1: Can LLM-advisors effectively elicit user preferences through conversation?", + "- RQ2: Does personalization lead to better investment decisions and a more positive advisor assessment?", + "- RQ3: Do different personality traits affect decision quality and advisor assessment?" + ], + "bbox": [ + 109, + 702, + 478, + 781 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address these questions, we conduct a lab-based user study that explores the effectiveness of LLMs as interactive conversational financial advisors, on which we simulate realistic investment scenarios using investor narratives and stock relevance scores curated by financial experts. Figure 1 illustrates an example conversation with the advisor, divided into two stages: first, the LLM-advisor attempts to capture the investor preferences through conversation; in", + "bbox": [ + 81, + 799, + 480, + 896 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "the second stage, given an individual asset, the advisor provides information about it to the investor, including how the asset matches (or not) the investor's preferences. To answer the different questions, we compare different configurations of the LLM-advisor: first, we compare personalized vs. non-personalized advisors, and, then, we compare two personalized advisors with distinct personalities.", + "bbox": [ + 511, + 106, + 913, + 189 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 513, + 202, + 658, + 217 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 Personalization and Preference Elicitation", + "text_level": 1, + "bbox": [ + 513, + 222, + 901, + 237 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Information systems, especially those focused on search and recommendation benefit from personalization [16]. Specifically, personalization techniques play a crucial role in enhancing user experience [19, 25, 42]. Interactive approaches, such as conversational preference elicitation represent the frontier of personalization. This problem has received growing attention, as advances in generative AI now provide a functional mechanism to collect user preferences dynamically in a free-form manner [41]. This interactive approach can capture more diverse and targeted insights than static approaches like questionnaires [7, 12, 26, 27, 34]. Indeed, recent studies have proposed various methods for effective conversational preference elicitation [34, 43], as well as user studies on the perceived quality of this process in domains such as e-commerce, movies, fashion, books, travel, and restaurant recommendations [2, 8, 17, 26, 34, 46].", + "bbox": [ + 511, + 241, + 913, + 448 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, we argue that for some important domains, trying to directly collect preferences is insufficient. An implicit assumption of these studies is that if directly asked, the user will be able to accurately express their preferences. It is reasonable to expect that this assumption would hold for scenarios like movie recommendation; we can ask a user \"do you like horror movies?\" and expect a useful response. On the other hand, this will not hold for complex tasks, where the user lacks the knowledge to form an accurate response [12, 40]. For instance, in an investment context if we asked \"do you prefer ETFs or Bonds?\", it is not clear that an inexperienced user would be able to produce a meaningful answer. In these cases, an ideal agent needs to fill the gaps in the user knowledge through conversation, as well as infer the user preferences across multiple (often uncertain) user responses. But how effective are generative AI agents at this complex task? This paper aims to answer that question for the domain of financial advisory; a particularly challenging domain given its technical nature and high risks if done poorly.", + "bbox": [ + 511, + 449, + 913, + 684 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2 Financial advisory", + "text_level": 1, + "bbox": [ + 513, + 696, + 709, + 713 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In the financial domain, advisors help individuals manage their personal finances by offering guidance on investments and assisting with decision-making. While financial advisors can be beneficial, their services often come at a high cost, making them unaffordable for many people. To mitigate this issue, automated (nonconversational) financial decision support systems such as financial recommender systems have been widely studied [45]. The majority of research in this area has been focused on how to find profitable assets (i.e. those that will make money if we invest in them). These works assume a simplified user-model, where an investor is only concerned with maximizing return-on-investment over a fixed period of time [30, 35]. These studies frame financial advisory as a ranking problem, where the goal is to rank financial assets for a user", + "bbox": [ + 511, + 715, + 913, + 896 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "SIGIR 2025, July 13-18, 2018, Padua, Italy", + "bbox": [ + 84, + 75, + 281, + 87 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Takayanagi et al.", + "bbox": [ + 828, + 75, + 911, + 85 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/386ffde58c6627098c75cf4e90481d55e10c3880e9b1334aba927ea8a1c86fd4.jpg", + "image_caption": [ + "Figure 2: Example of an investor profile, investment preferences, and ground truth ranking. Dashed line components are used for evaluation (and therefore, they are not shown to the user/LLM)." + ], + "image_footnote": [], + "bbox": [ + 151, + 103, + 848, + 224 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "over a specified time period. However, a recent study suggests that a large part of the value offered by human financial advisors stems from their ability to personalize investment guidance to clients' specific needs, build relationships, and foster trust [15], rather than simply presenting suitable assets.", + "bbox": [ + 81, + 279, + 480, + 348 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Reflecting on these findings, the development of conversational financial advisors has drawn increasing attention, as it enables a dynamic understanding of users' needs, personalized guidance, and the potential to build trustworthy relationships [3, 9, 11, 18, 44]. In particular, the conversational agents' personality has gained attention as a factor that can help build relationships with clients and foster trust [21], especially given the successes of conversational agents using the Big Five personality model [23] to enhance the end-user experience [5, 33]. Although conversational agents show potential in finance, how to configure them to match the value of human advisors remains unclear. Therefore, we conduct a user study to examine how personalizing investment guidance and the advisor's personality shape users' financial decision-making effectiveness and overall user experience.", + "bbox": [ + 81, + 349, + 482, + 542 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Methodology", + "text_level": 1, + "bbox": [ + 83, + 559, + 220, + 574 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this paper we aim to determine to what extent current generative language models can act as an effective financial advisor. Indeed, given the need to personalize for the user, emotional implications, the technical nature of the information-seeking task, and high impact if failed, we argue that this is an excellent test case for the limits of generative large language models. To structure our evaluation, we divide our study into two phases, as illustrated in Figure 1, where we evaluate the success of both:", + "bbox": [ + 81, + 577, + 482, + 688 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) Preference Elicitation: During this stage, we have the LLM-advisor hold a natural language conversation with a human, where it is directed to collect information regarding the person's investment preferences. The human in this interaction is pretending to have preferences from a given investor profile.", + "(2) Advisory Discussion: During the advisory discussion, the LLM-advisor again has a natural language conversation with the human (acting on an investor profile), where the human collects information about whether a company is a suitable investment for them. This is repeated for multiple companies per investor profile." + ], + "bbox": [ + 101, + 695, + 482, + 861 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We provide preparatory information and discuss each stage in more detail below:", + "bbox": [ + 81, + 868, + 480, + 895 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 Investor Profiles", + "text_level": 1, + "bbox": [ + 514, + 277, + 692, + 291 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To fairly evaluate the ability of any LLM-advisor, we need to have them interact with human users with real needs. Given the open-ended nature of free-form conversations, it is desirable to repeat each experiment with different people such that we can observe variances in conversation paths, as those variances may influence task success. However, to enable repeatability, we need to hold the investor needs constant across repetitions. Hence, we define three archetypal investor profiles $i \\in I$ based on input from a financial expert, where our human participants are given one to follow when conversing with the LLM-advisor:", + "bbox": [ + 511, + 296, + 913, + 434 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Investor 1: Growth-Oriented Healthcare Enthusiast: Prefers healthcare innovations, values high-growth opportunities, and takes measured risks.", + "- Investor 2: Conservative Income Seeker: Seeks stable returns, invests in well-established companies, values regular dividend payouts.", + "- Investor 3: Risk-taking Value Investor: Targets undervalued companies with strong long-term potential, tolerates short-term volatility, and invests in cyclical sectors." + ], + "bbox": [ + 540, + 440, + 913, + 565 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For each of these investor profiles, we select three key investment preferences, chosen from well-known investment characteristics such as industry sector, stock style, consistency in dividend payments, and sensitivity to global market changes [10]. We denote the set of investor preferences as $i^{pref}$ . In our experiments, we simulate a realistic elicitation scenario where the advisor collects the preferences from the participants. Therefore, we do not straightforwardly provide the preferences to the participants. Instead, we present them as text narratives of between 150 to 200 words. A financial expert was consulted to confirm the quality and reliability of these narratives. An example narrative representing Investor 2 is illustrated in Figure 2, where we highlight the sentences referring to specific investor preferences.", + "bbox": [ + 511, + 570, + 913, + 751 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2 Stage 1: Preference Elicitation", + "text_level": 1, + "bbox": [ + 514, + 765, + 802, + 780 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The goal of stage 1 of our study is to determine to what extent an LLM-advisor can effectively collect a user's investment preferences through conversation. Formally, given a participant of the user study $u$ and an investor profile $i$ , during the elicitation stage, the LLM-advisor aims to obtain an approximated set of preferences, denoted $i_u^{LLM}$ , that matches the investor preferences ( $i^{pref}$ ). To achieve this, the generative model produces a series of questions that participants answer by interpreting the investor narrative.", + "bbox": [ + 511, + 784, + 913, + 896 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Are Generative AI Agents Effective Personalized Financial Advisors?", + "bbox": [ + 83, + 75, + 405, + 85 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "SIGIR 2025, July 13-18, 2018, Padua, Italy", + "bbox": [ + 715, + 75, + 913, + 87 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Responses to those questions, denoted as $R_{i}^{u}$ , are used by the LLM-advisor to generate the user profile $i_{u}^{LLM}$ . Success is then measured by manually evaluating the overlap between $i^{pref}$ and $i_{u}^{LLM}$ .", + "bbox": [ + 81, + 106, + 480, + 151 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For user elicitation, we adopted a System-Ask-User-Respond (SAUR) paradigm [43]. During the conversation, the advisor proactively inquires about the user's preferences given a set of target preferences (e.g., industry type, acceptable risk). After the human participant responds to a question, the LLM-advisor checks whether the collected preferences cover all of the target preferences. If the advisor is confident that they do, it ends the conversation and prompts the user to proceed to the next stage; otherwise, it continues asking follow-up questions in a loop.", + "bbox": [ + 81, + 151, + 482, + 276 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3 Stage 2: Advisory Discussion", + "text_level": 1, + "bbox": [ + 83, + 287, + 359, + 303 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Stage 2 of our study investigates to what extent an LLM-advisor can provide the same benefits as a real human advisor when exploring investment options. Note that the goal here is not to have the LLM-advisor promote any one asset, but rather to provide accurate and meaningful information such that the human can find the best investment opportunity for them. To this end, we structure our experiment such that the human (acting on an investor profile) has one conversation with the LLM-advisor for each of a set of assets being considered.1 After all assets are presented to the participant, a stock ranking is generated by sorting the stocks by the participant rating in descending order.", + "bbox": [ + 81, + 305, + 482, + 458 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Importantly, as we know the investor profile $i^{pref}$ for each conversation about an asset $a$ , we can objectively determine whether $a$ is a good investment given $i^{pref}$ , forming a ground truth against which we can compare to the rating provided by our human participant after their conversation with the LLM-advisor. For each asset $a$ , a financial expert produced a score between 0 and 3 by manually checking whether $a$ satisfied each of the three investment criteria contained in $i^{pref}$ . A ground-truth ranking was produced by sorting the assets by the expert scores. We show an example of the ranking construction in Figure 2. During evaluation, the closer the participant ranking is to the ranking produced by expert judgments, the better the LLM-advisor performed.", + "bbox": [ + 81, + 458, + 482, + 626 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Baseline Prompt: As we are working with an LLM-advisor and the nature of financial information-seeking is time-sensitive, we need to provide any information that might change over time to the LLM within the prompt. As such, for each asset $a$ , we pre-prepared a standard asset descriptor block after consulting with a financial expert, containing:", + "bbox": [ + 81, + 633, + 482, + 717 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Stock Prices: We collect monthly stock prices from 2023 using Yahoo! Finance.2", + "- Business Summary: We gather each company's business overview from Yahoo! Finance.", + "- Recent Performance and Key Financial Indicators (e.g., EPS): We obtain earnings conference call transcripts3 from Seeking Alpha for the last quarter of 2023." + ], + "bbox": [ + 109, + 719, + 480, + 816 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/fd66e0a7cc774da4caaecdf2f74a7c28e16bf0343f962b2806eee36f86cbf795.jpg", + "image_caption": [ + "Figure 3: User study structure." + ], + "image_footnote": [], + "bbox": [ + 527, + 104, + 897, + 323 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The advisor using this prompt acts as our baseline for the advisory discussion study. We augment this baseline with additional context and instructions to form two additional experimental scenarios, discussed below:", + "bbox": [ + 513, + 356, + 913, + 411 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "+Personalization: As discussed earlier, one of the core roles of the financial advisor is to personalize to the individual customer, based on their financial situation, needs, and preferences. To enable the LLM-advisor to personalize for the user, we integrate the generated profile from the preference elicitation (Stage 1) $i_u^{LLM}$ into the prompt. We represent each preference as a series of short sentences.", + "bbox": [ + 511, + 419, + 913, + 503 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "+Personality: In Section 2.2 we discussed how human financial advisors provide emotional support as well as financial advice. While it is unlikely that an LLM-advisor could do this as well as a human (it lacks both emotional intelligence and non-conversational clues to the customer's mental state [39]) it might be possible to provide a better end-user experience by directing the LLM-advisor to adopt a personality. As noted in Section 2 it is possible to do this via prompt engineering, such as instructing the LLM to take on the traits of one or more of the Big-Five personality types [23].", + "bbox": [ + 511, + 508, + 913, + 633 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As we are performing a user study with humans, it would be impractical to exhaustively test every combination of personality types, hence as an initial investigation we experiment with two distinct personality profiles [32]:", + "bbox": [ + 511, + 633, + 911, + 689 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Extroverted: High in extroversion, agreeableness, and openness; low in conscientiousness and neuroticism.", + "- Conscientious: Low in extroversion, agreeableness, and openness; high in conscientiousness and neuroticism." + ], + "bbox": [ + 540, + 693, + 913, + 747 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We adopted the prompting method from Jiang et al. (2024) to assign a Big Five personality trait to the LLM agent [14], choosing it for its simplicity and effectiveness among various proposed approaches for embedding personality in LLMs (including both prompting and fine-tuning) [13, 14, 31]. To ensure a high standard of professionalism and accurate representation of the intended personality, we consulted financial professionals to review the texts generated by LLMs adopting both personas.", + "bbox": [ + 511, + 750, + 913, + 862 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "SIGIR 2025, July 13-18, 2018, Padua, Italy", + "bbox": [ + 84, + 75, + 282, + 85 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Takayanagi et al.", + "bbox": [ + 828, + 75, + 911, + 87 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "These were manually selected, however in a production environment these might be produced by an asset recommendation system.", + "bbox": [ + 81, + 824, + 482, + 845 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "2The scenario for the financial advising of our user study is set to December 30, 2023. By basing our experiment at the end of 2023, we avoid the problem of data contamination [28].", + "bbox": [ + 81, + 845, + 482, + 877 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "3Earnings conference calls, hosted by publicly traded companies, discuss key aspects of their earnings reports and future goals with financial analysts and investors, thus covering critical financial indicators and recent performance insights [24]. These", + "bbox": [ + 81, + 876, + 480, + 909 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "transcripts cover significant financial indicators and provide explanations of recent performance.", + "bbox": [ + 513, + 872, + 913, + 896 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4 Experimental Design", + "text_level": 1, + "bbox": [ + 83, + 104, + 299, + 121 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In our experiment, we conducted two studies: a personalization study (for RQ2) and an advisor persona study (for RQ3). In the personalization study, participants compared a non-personalized (Baseline) advisor with a personalized (+Personalized) version. In the advisor persona study, they compared different LLM-advisor personality types (+Extroverted vs. +Conscientious). Participants are randomly assigned to one of these two studies.", + "bbox": [ + 81, + 125, + 480, + 220 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Figure 3 shows the structure of our user study for a single participant, comprising seven steps:", + "bbox": [ + 83, + 220, + 482, + 250 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) Participant Training: Participants are given a general overview of the user study and given instructions on their expected roles during preference elicitation, advisory discussions, asset ranking, and advisor assessment.", + "(2) Investor Profile Allocation: The user $u$ is randomly allocated one of the investor profiles (See Section 3.1) that they will follow. Each profile is assigned to 42 participants.", + "(3) Preference Elicitation (Stage 1): The participant interacts with the LLM-advisor as if they were a new investor. The conversation ends once the LLM-advisor determines that they know enough about the investor to personalize for them. The median time spent on preference elicitation was 5 minutes and 11 seconds.", + "(4) Response Summarization: Given the aggregator of user responses $R_{i}^{u}$ , we instruct an LLM to generate an investor profile $i_{u}^{LLM}$ . For each investor preference in $i^{pref}$ , if there is any relevant information in the responses $R_{i}^{u}$ , that information is included in $i_{u}^{LLM}$ . Otherwise, $i_{u}^{LLM}$ indicates that no relevant information is available for that specific preference.", + "(5) Advisory Discussion (Stage 2): To simplify the conversation flow we have the participant hold separate conversations with the LLM-advisor for each asset they might invest in. The LLM-advisor is provided with context about the current asset (see Section 3.3), and depending on the experimental scenario, optionally personalization information (step 4 output) and/or a target personality context statement. Each conversation continues until the user is satisfied that they have enough information to rate the asset. The order in which the assets are discussed is randomly assigned to avoid position bias.", + "(6) Asset Ranking and Feedback: Participants rank all the stocks (four in total) discussed in the advisory session according to their desire to invest in each. They also assess the advisor they interacted with using a 7-point Likert scale for the items listed in Table 1 (see Section 4)." + ], + "bbox": [ + 99, + 263, + 503, + 753 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To enable more effective pair-wise comparison of LLM-advisor variants, we have each participant test two variants per study. If the user has only tested one variant at this point, then they repeat the user study (starting at step 2) with the second variant. The order in which participants experience each variant is randomly assigned.", + "bbox": [ + 81, + 768, + 482, + 839 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/8d963780ab0d5919fae3ad330b87890337b19b348fbed38c53dc36c9ddfcde88.jpg", + "table_caption": [ + "Table 1: Operational definitions used in the advisor assessment questionnaire for all response dimensions." + ], + "table_footnote": [], + "table_body": "
Response DimensionOperational Definition
Perceived Personalization [16]The advisor understands my needs.
Emotional Trust [16]I feel content about relying on this advisor for my decisions.
Trust in Competence [16]The advisor has good knowledge of the stock.
Intention to Use [16]I am willing to use this advisor as an aid to help with my decision about which stock to purchase.
Perceived Usefulness [25]The advisor gave me good suggestions.
Overall Satisfaction [25]Overall, I am satisfied with the advisor.
Information Provision [38]The advisor provides the financial knowledge needed.
", + "bbox": [ + 517, + 145, + 913, + 262 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In our experiments, we use Llama-3.1 8B as the background model for all our LLM-advisor variants. $^4$", + "bbox": [ + 513, + 273, + 911, + 301 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.5 Participants", + "text_level": 1, + "bbox": [ + 514, + 314, + 658, + 330 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We recruited 64 participants from the authors' affiliated university for our study: 32 participants for the personalization study and 32 participants for the advisor persona study, utilizing the university's online platform and blackboard for recruitment. Participants were required to be fluent in English, over 18 years old, and have an interest in finance and investment, mirroring the target demographic of our system's users. After excluding invalid data, 29 participants remained in the personalization study and 31 in the advisor persona study. We conducted a power analysis using the Wilcoxon signed-rank test for matched pairs, with the experimental conditions as the independent variable and users' response to the advisor assessment questionnaire as the dependent variable [29]. The analysis determined that 29 participants are needed to observe a statistically significant effect on user-perceived quality. Our recruitment criteria and compensation (£10/hour) for approximately one hour of participation were approved by our organization's ethical board.", + "bbox": [ + 511, + 332, + 915, + 554 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 Evaluation Metrics and Statistics", + "text_level": 1, + "bbox": [ + 514, + 566, + 810, + 580 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section we discuss how we quantify effectiveness for the preference elicitation and advisory discussion stages, respectively, in addition to summarizing dataset statistics for each.", + "bbox": [ + 511, + 584, + 913, + 627 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 Preference Elicitation Metrics (Stage 1)", + "text_level": 1, + "bbox": [ + 514, + 638, + 872, + 655 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To evaluate the quality of the first preference elicitation stage, we want to measure how well the LLM-advisor has captured the investor preferences as defined in the investor profile $i$ (see Section 3.1). Each investor profile $i \\in I$ defines key features of the investor, such as preferring high-growth stocks, or favoring regular payouts, denoted $i^{pref}$ . We have three investor profiles ( $|I| = 3$ , with $10(n)$ participants performing elicitation on $i_u^{LLM}$ for each profile and each LLM variant, i.e. there are 120 elicitation attempts in total, with 30 attempts per LLM-advisor variant. Following the notation in Section 3, $i_u^{LLM}$ in this case denotes a similar list of features to $i^{pref}$ that LLM-advisor learned about the investor during conversation with a participant $u$ , which we derive from a manual analysis of the elicitation output (i.e. what is produced by response summarization). Intuitively, the closer the features produced from", + "bbox": [ + 511, + 657, + 913, + 853 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Are Generative AI Agents Effective Personalized Financial Advisors?", + "bbox": [ + 83, + 75, + 405, + 87 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "SIGIR 2025, July 13-18, 2018, Padua, Italy", + "bbox": [ + 715, + 75, + 911, + 87 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "(7) Exit Questionnaire: Once a pair of LLM-advisor variants have been tested, the user fills in an exit questionnaire that is designed to ask the overall experience in the user study.", + "bbox": [ + 101, + 854, + 482, + 896 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "Further details about the LLM configuration, investor narratives, relevant scores, prompts and scripts for data analysis can be accessed at the following repository: https://github.com/TTsamurai/LLMAdvisor_supplement", + "bbox": [ + 513, + 862, + 915, + 896 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/a6b48ac0208aa2ebad6a9884585dd0814d5926805bfbac9e8cc672c73a856a67.jpg", + "table_caption": [ + "Table 2: General statistics of the collected conversation data." + ], + "table_footnote": [], + "table_body": "
Participants60
Time Period2024/10/24 ~ 2024/11/7
Total Turns10,008
Stage 1: Preference Elicitation
Total Turns1,788
Number of Sessions120
Avg. Turns/Session15.8
Avg. User Words/Turn9.8
Stage 2: Advisory Discussion
Total Turns8,220
Number of Sessions480
Avg. Turns/Session18.2
Avg. User Words/Turn13.0
", + "bbox": [ + 114, + 130, + 450, + 318 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "any elicitation attempt $i_{u}^{LLM}$ is to $i^{pref}$ , the better the LLM-advisor is performing. To this end, we report elicitation accuracy for each investor profile, calculated as:", + "bbox": [ + 81, + 330, + 482, + 375 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\text {E l i c i t a t i o n A c c u r a c y} (i) = \\frac {1}{n} \\sum_ {j = 1} ^ {n} \\frac {\\left| i _ {j} ^ {L L M} \\cap i ^ {p r e f} \\right|}{\\left| i ^ {p r e f} \\right|} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 140, + 378, + 482, + 424 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Human Advisor: To provide a point of comparison, we also conduct a preference elicitation with a financial expert using the same prompt and instructions as the LLM. This allows us to evaluate how close LLMs are to a paid human advisor undertaking the same task. More specifically, for each investor profile, three participants engaged with this expert, who then produced a set of preferences $i_u^{Expert}$ , which can be used instead of $i_u^{LLM}$ in Equation 1.", + "bbox": [ + 81, + 436, + 482, + 537 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 Advisory Effectiveness Metrics (Stage 2)", + "text_level": 1, + "bbox": [ + 83, + 547, + 449, + 565 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Ranking correlation (Spearman's Rho): In the second stage, we evaluate how well the LLM-advisor can support an investor to select financial assets that are suitable for them to invest in. Recall from Figure 3 that after a participant finishes discussing all assets with the LLM-advisor, they rank those assets $a \\in A_i$ based on the likelihood they will invest in each, i.e. each participant $u$ acting on a profile $i$ we have an asset ranking $R(A_i, i_u)$ . As illustrated in Figure 2, each investor profile $i$ was derived from a ground truth set of investor preferences $i^{pref}$ , which an expert used to create a ground truth ranking $R(A_i, i^{pref})$ , i.e. the \"correct\" ranking of assets. Intuitively the closer the $R(A_i, i_u)$ is to $R(A_i, i^{pref})$ , the better the advisor is performing, as the participant was better able to distinguish suitable assets vs. unsuitable ones. Hence, to evaluate the effectiveness of the advisory task, we report the mean ranking correlation (Spearman's Rho) between $R(A_i, i_u)$ and $R(A_i, i^{pref})$ across participants $u$ for each LLM-advisor.", + "bbox": [ + 81, + 566, + 482, + 792 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Advisor Assessment Questionnaire: Lastly, we also gather qualitative data from each participant via a questionnaire. In particular, after ranking assets each participant, reports how they feel the LLM-advisor performed in terms of 7 dimensions, listed in Table 1, such as perceived usefulness, trust, and user satisfaction. We use this data later to evaluate how sensitive the user is to differences in the LLM-advisor.", + "bbox": [ + 81, + 797, + 482, + 896 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/d0bf440a8e47e435aae0fc2e57b40bd06a3e25f2e13d643069bc8f0b44f7c2b6.jpg", + "table_caption": [ + "Table 3: Stage 1 - Comparison of Elicitation Accuracy of an expert vs. different LLM-advisors for each investor profile. The best advisor is highlighted in bold. Arrows denote percentage increases $(\\uparrow)$ or decreases $(\\downarrow)$ compared to the expert." + ], + "table_footnote": [], + "table_body": "
Investor ProfileExpertLLM-Advisors
LLM+Extr.+Cons.Average
Growth-Oriented0.780.760.800.790.78→0.0%
Conservative-Income0.890.820.750.870.82↓7.8%
Risk-Taking0.890.480.600.550.53↓40.5%
Average0.850.690.700.730.70↓17.6%
", + "bbox": [ + 517, + 172, + 913, + 268 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3 Dataset Statistics", + "text_level": 1, + "bbox": [ + 514, + 272, + 697, + 286 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2 summarizes the statistics of the data collected during the two stages of our user study. Each conversation that a participant had with an LLM-advisor in either stage 1 or 2 is referred to as a session, e.g. during Stage 1, there were 3 investor profiles * 10 participants * 4 LLM-advisors, resulting in 120 sessions. Stage 2 has 4x the number of sessions, as there are four assets associated with each profile ( $A_i$ ) to discuss with the LLM-advisor.", + "bbox": [ + 511, + 290, + 913, + 387 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "From Table 2 we observe that in contrast to other conversational tasks [36, 37], financial information-seeking appears to require more extended interactions. On average, preference elicitation involves 15 turns per session with 9.8 words per turn, whereas advisory discussions involve 18 turns per session with 13.0 words per turn, highlighting the overall complexity of the task.", + "bbox": [ + 511, + 387, + 913, + 470 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5 Results", + "text_level": 1, + "bbox": [ + 514, + 481, + 607, + 496 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this work, we explore how to design conversational financial advisors that enhance both decision-making and positive experience. To achieve this, our user study is guided by 3 core research questions.", + "bbox": [ + 513, + 500, + 915, + 542 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- RQ1: Can LLM-advisors effectively elicit user preferences through conversation?", + "- RQ2: Does personalization lead to better decisions and more positive advisor assessment?", + "- RQ3: Do different personality traits affect decision quality and advisor assessment?" + ], + "bbox": [ + 540, + 544, + 911, + 626 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1 RQ1: Elicitation accuracy", + "text_level": 1, + "bbox": [ + 514, + 642, + 764, + 657 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We begin by examining how effective the LLM-advisors are at identifying investment preferences during conversations in Stage 1. Elicitation Accuracy is the primary metric, where we contrast the mean accuracy across 10 sessions in comparison to a human expert tackling the same task (see Section 4.1). Table 3 reports elicitation accuracy for each LLM-advisor and the Human Expert across investment profiles. Arrows denote percentage increases $(\\uparrow)$ or decreases $(\\downarrow)$ of the LLM-advisor compared to the expert.", + "bbox": [ + 511, + 660, + 913, + 771 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To set expectations, we first consider the performance of the expert in the first column in Table 3, as we might expect, the expert maintains consistently high performance across all profiles, averaging $85\\%$ accuracy (random accuracy is $50\\%$ ). This forms an expectation of the performance ceiling for the task.", + "bbox": [ + 511, + 771, + 913, + 840 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Next, we compare the expert performance to each LLM-advisor. From the perspective of preference elicitation, there are three LLM-advisor configurations, those that use only the Baseline Prompt (denoted LLM) from the personalization study, and those that include", + "bbox": [ + 511, + 840, + 913, + 896 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "SIGIR 2025, July 13-18, 2018, Padua, Italy", + "bbox": [ + 84, + 75, + 282, + 87 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Takayanagi et al.", + "bbox": [ + 828, + 75, + 911, + 87 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "a defined personality (either extroverted, $+\\mathrm{Extr}$ , or conscientious, $+\\mathrm{Cons}$ .) from the advisor persona study. From Table 3, we observe that the LLM-advisor's performance is generally strong for growth-oriented, and conservative-income investors (with accuracy around $80\\%$ ) on average, which is similar to the human advisor. However, for the risk-taking investor profile, the LLM-advisor's elicitation accuracy was substantially lower $(-40.5\\%)$ .", + "bbox": [ + 81, + 106, + 480, + 204 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "From a manual failure analysis, we observed the following trends that contribute to the performance gap with the human advisor, particularly for the risk-taking profile. First, it is notable that elicitation failures can originate from the investor (participant) rather than the LLM. Recall that one of the aspects that makes finance more challenging than domains like movie recommendation is that the \"user\" is inexpert, and so may give incorrect information during the conversation. Indeed, we observed cases where the participant confused concepts such as the difference between a growth and a value stock, as well as cyclical/non-cyclical assets. On the other side, preference hallucination is a core issue for the LLM-advisor. The LLM is a probabilistic token generator conditioned on the baseline prompt and prior conversation, and as a result, in some scenarios, the contextual content can override a statement by the investor. This type of error is more likely when the investor is unsure in their responses or when they provide contradictory statements. For instance, an investor expressing an interest in the consumer discretionary sector while simultaneously opting for non-cyclical stocks, despite consumer discretionary being inherently cyclical.", + "bbox": [ + 81, + 205, + 482, + 467 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To answer RQ1, our results demonstrate that LLM-advisor's are able to elicit preferences from a user via conversation and that for 2/3's of the user profiles tested, elicitation accuracy was consistently equivalent or close to that of an expert human advisor. However, we observed a clear failure mode when testing the risk-taking profile, where misunderstandings by the investors and hallucinations within the LLM compound to result in accuracy that is close to random. Overall, we consider this a promising result, as the majority of the time it is effective, and the failure mode observed might be rectified by better context crafting and the addition of contradiction detection; both directions for future research.", + "bbox": [ + 81, + 473, + 482, + 625 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2 RQ2: Effectiveness of personalization", + "text_level": 1, + "bbox": [ + 83, + 638, + 429, + 654 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Having shown that automatic preference elicitation is possible, we now examine stage 2 of our study, namely the advisory discussions. Given the inherently personalized nature of financial advice, we expect that the customer preferences obtained during stage 1 will be key to enabling LLM-advisors to provide effective investment advice. Hence, in this section, we compare the performance of an LLM-advisor using only the Baseline Prompt to one that includes the preferences obtained during stage 1 (+Personalized). However, as we observed that preference elicitation is not always successful, we also examine what effect elicitation performance has on the LLM-advisor.", + "bbox": [ + 81, + 657, + 482, + 809 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2.1 Non-personalized Decision-making Effectiveness: We initially establish how effective the LLM-advisor is without any information regarding the investor. LLM-advisor effectiveness is measured", + "bbox": [ + 81, + 818, + 482, + 859 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/c4b270fbbcb62fddb142afbc026f33c4b10b865d46fa8bd61bc31254260efa7f.jpg", + "table_caption": [ + "Table 4: Investor decision-making effectiveness, expressed as the Spearman's Rho correlation between the investor's asset ranking and the expert asset ranking (higher is better). $\\dagger$ indicates statistical improvements (Welch's t-test with $p<0.05$ ) over the not personalized baseline, while $\\S$ indicates significant differences between cases with successful and unsuccessful preference elicitations." + ], + "table_footnote": [], + "table_body": "
Advisor ConfigInvestor vs. Expert (Spearman's Rho)
PersonalizationPersonalityAllPreference Elicitation
SuccessfulUnsuccessful
BaselineNone0.110--
+PersonalizedNone0.3100.481†§-0.228
+Personalized+Extroverted0.1220.243§-0.286
+Personalized+Conscientious0.260.365-0.025
", + "bbox": [ + 516, + 215, + 911, + 311 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "based on how well the investor was able to rank the assets discussed by suitability to them. The primary metric is average Spearman's Rho correlation between the investor ranking and the ground truth ranking (see Section 4.2), reported in Table 4 row 1. As we expect, baseline advisory performance is low, with only a very weak positive correlation to the ground truth ranking of 0.11. This indicates that without further evidence, the LLM is not able to meaningfully guide the investor.", + "bbox": [ + 511, + 318, + 913, + 429 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2.2 Personalized Decision-making Effectiveness: Having established our baseline, we now examine the impact that adding the investor preferences collected during stage 1 has, comparing Table 4 row 1 (baseline) to row 2 (personalized). As we anticipated, personalization is beneficial, with investor decision-making effectiveness increasing from 0.11 to 0.31 (average Spearman's Rho correlation to the expert ranking). However, this correlation is still weak, illustrating that while discussing assets with the LLM-advisor is better than no help at all, our participants are still struggling to evaluate the suitability of financial assets.", + "bbox": [ + 511, + 436, + 915, + 575 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "This correlation is an average over all the participants in the user study, regardless of how effective their preference elicitation was in stage 1. Hence, we might ask whether the low correlation is due to the LLM-advisor being confused by poor preference elicitation data. To explore this, Table 4 also reports investor decision-making effectiveness stratified based on whether stage 1 was successful (column 4) or not (column 5). As expected, we see a statistically significant increase in investor decision-making effectiveness when preference elicitation was successful when compared to non-personalized sessions (0.481 vs. 0.110). More concerningly, we also see the LLM-advisor has a strong negative influence on the investors' decision-making capability if preference elicitation fails, as illustrated by the negative correlations with the expert in column 5. This result highlights both that effective preference elicitation is crucial, but also that the LLM-advisor can easily influence the investor into making poor decisions, as the human is heavily reliant on the agent to navigate the relatively unfamiliar financial information space.", + "bbox": [ + 511, + 575, + 913, + 811 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2.3 Participant Assessment of the Advisor: So far we have demonstrated that there is a large difference between a non-personalized LLM-advisor and a personalized one, in terms of how they can", + "bbox": [ + 513, + 819, + 913, + 861 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Are Generative AI Agents Effective Personalized Financial Advisors?", + "bbox": [ + 83, + 75, + 405, + 87 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "SIGIR 2025, July 13-18, 2018, Padua, Italy", + "bbox": [ + 715, + 75, + 911, + 87 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "5Note we cannot have a personalized variant here, as the personalization evidence is derived from this stage.", + "bbox": [ + 81, + 872, + 482, + 896 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "6We define that an elicitation session is successful if more than 50% of the investor's preferences were correctly captured", + "bbox": [ + 513, + 872, + 911, + 896 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/5e64b4ef44e612c46e88ed22abbd02c291273be03180e6dbc3ff63f2edfb84e5.jpg", + "table_caption": [ + "Table 5: Average participant users' response to advisor assessment questionnaire under different advisor conditions. Columns labeled with advisor condition (Baseline, +Pers., +Cons., +Extr.) contain a 7-point Likert scale (higher is better). \"p\" column contains Wilcoxon signed-rank test $p$ -values for (RQ2) Baseline vs. +Personalized (Pers.), and (RQ3) +Conscientious (Cons.) vs. +Extroverted (Extr), for both the full data (All) and the subset where the elicitation accuracy is above 0.5. \"Successful Elicitation\" refers to the subset where elicitation accuracy was ≥ 0.5. For RQ2, this subset consists of pairs for which +Pers elicitation is successful, while for RQ3, it consists of pairs for which both +Extr and +Cons elicitation are successful. Boldface indicates significant effects with † for $p < 0.1$ and ‡ for $p < 0.05$ ." + ], + "table_footnote": [], + "table_body": "
Response Dimension(RQ2) Baseline vs. +Personalized(RQ3) +Conscientious vs. +Extroverted
AllSuccessful ElicitationAllSuccessful Elicitation
Baseline+Pers.pBaseline+Pers.p+Cons.+Extr.p+Cons.+Extr.p
Perceived Personalization5.7595.7240.8385.7625.9050.7515.5005.5000.6635.5885.7060.941
Emotional Trust5.1035.2410.4465.1435.3330.5375.0385.1540.6004.7065.2350.034‡
Trust in Competence5.6905.6900.8175.8105.8570.7825.9626.0770.5386.0006.0001.000
Intention to Use5.3105.4830.5055.4295.7140.1664.8855.4620.005‡4.9415.5880.013‡
Perceived Usefulness5.2415.5170.1835.3815.8100.1945.4235.5380.4255.1765.1180.968
Overall Satisfaction5.3455.6900.1165.4295.8100.098†5.2695.5770.1795.1185.5290.244
Information Provision5.5175.9660.026‡5.7146.1430.053†5.6925.6540.9535.5885.7650.490
", + "bbox": [ + 125, + 207, + 874, + 345 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "alter the decision-making of the investor/participant. But can the participant tell the differences between them?", + "bbox": [ + 81, + 364, + 480, + 392 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 5 reports the aggregation of the qualitative data we collected from each participant after they finished interacting with each LLM-advisor in terms of 7 dimensions, where we start by focusing on the RQ2-All columns, i.e. comparing the baseline and personalized variants. The important observation to note here is that the participant preference scores for both variants are statistically indistinguishable, except under the quality of information provision criteria. This means that our participants cannot tell if the LLM-advisor is personalizing to them, and trust the worse agent just as much as the better one. Furthermore, if we consider the best case scenario where the preference elicitation was successful (RQ2 Successful Elicitation columns) we observe the same pattern, even though the difference between the baseline and the personalized variants in terms of the effect it has on the participant decision-making is more pronounced. This underlines one of the core risks of using LLM-advisors in the financial domain; since our users are inherently inexpert they lack the fundamental skills to judge to what extent the LLM is providing good advice, meaning that there is no safety net if the LLM makes a mistake.", + "bbox": [ + 81, + 393, + 482, + 655 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To answer RQ2, our results show that a personalized LLM-advisor is able to provide useful financial advice when it has accurate information regarding the preferences of the investor. This is demonstrated by better decision-making capability by participants using the personalized advisor in comparison to the non-personalized one. However, we also identified two important challenges to adoption. First, the impact the LLM-advisor has is strongly tied to the quality of the preference elicitation data provided, where poor preference elicitation will cause the agent to actively direct the investor to the wrong assets. Second, while the participants were positive regarding the LLM-advisors across all questionnaire criteria, they were not able to consistently tell the difference between good and bad advisors; leading to an increased risk of humans acting on bad advice.", + "bbox": [ + 81, + 662, + 482, + 843 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3 RQ3: Effectiveness of personalities", + "text_level": 1, + "bbox": [ + 514, + 363, + 841, + 378 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Once we have confirmed the utility of personalization for LLM-advisors, we now study the effect that the personality of the advisor has on users' financial information-seeking. As previous studies have shown [32], chatbot personality can affect the way humans interact with the chatbot, and therefore affect the effectiveness and perception of LLM-advisors. To understand whether personality affects LLM financial advisors, we compare two personalized LLM-advisors on which we have injected a pre-defined personality: an extroverted personality and a conscientious personality. While we could consider the personalized LLM-advisor discussed in Section 5.2 as a third distinct personality (the base LLM personality of the LLM), we shall not compare it with our personality-injected models, because different sets of participants were used in the personalization study and the advisor-persona study.", + "bbox": [ + 511, + 382, + 915, + 575 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3.1 Decision-making Effectiveness: We first examine the impact of adding personality to the advisors on the decision-making process, by measuring the capacity of the participants to correctly rank the assets (as previously done in Section 5.2). As a primary metric, we again use the average Spearman's Rho correlation between the investor ranking and the ground truth ranking reported in Table 4 rows 3 (extroverted advisor) and row 4 (conscientious advisor).", + "bbox": [ + 511, + 585, + 913, + 681 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We first observe the results for the full set of participants in the user study. Interestingly, we observe a difference between the two advisors, with the conscientious LLM-advisor providing better guidance than the extroverted one (0.26 vs. 0.122). This observation is consistent when we restrict our analysis to those cases where the preference elicitation is successful. While, expectedly, the effectiveness of both advisors improves when the elicitation is successful (0.243 vs. 0.122 in the case of the extroverted advisor and 0.365 vs. 0.26 in the case of the conscientious one), the conscientious advisor has an advantage over the extroverted one (0.365 vs. 0.26).", + "bbox": [ + 511, + 683, + 913, + 820 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "These results highlight that providing different personalities to an LLM-advisor can notably impact the capacity of the advisor to provide useful information to the investors.", + "bbox": [ + 513, + 821, + 913, + 862 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "SIGIR 2025, July 13-18, 2018, Padua, Italy", + "bbox": [ + 84, + 75, + 281, + 87 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Takayanagi et al.", + "bbox": [ + 828, + 75, + 911, + 87 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "Refer to Section 3.3 for a full description of each personality.", + "bbox": [ + 514, + 883, + 805, + 895 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3.2 Participant Assessment of the Advisor: We have observed so far that the use of different personalities affects the user decision-making process. But how do these personalities affect the perception that users have of the LLM-advisor? We observe this in Table 5, in terms of the seven dimensions captured during the advisor assessment questionnaire.", + "bbox": [ + 86, + 107, + 480, + 189 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We first look at the RQ3-All columns, comparing the two personalities. Notably, for the majority of the dimensions, users barely distinguish between both systems. The only answer where we observe a statistically significant difference is the intention to use the system in the future. Surprisingly, despite providing worse guidance to the investor, participants expressed a higher interest in using the extroverted advisor than the conscientious one. When we limit our study to those participants who experienced a successful preference elicitation in both advisor variants, this issue is stressed, as those users also develop a significantly greater emotional trust with the extroverted advisor.", + "bbox": [ + 86, + 189, + 480, + 340 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "These observations are worrisome, as they reveal that the personality of a financial advisor cannot only affect the quality of the advice but also lead the investors to trust more on those systems providing worse advice.", + "bbox": [ + 86, + 342, + 480, + 397 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.3.3 Differences in language: To further understand how personalities affect financial advisory, we analyze the differences in the linguistic patterns provided by extroverted and conscientious advisors. Analyzing participants' reported overall experience from the exit questionnaires in the advisor persona study, over $20\\%$ (7 of 31) described the extroverted advisor as clear, assertive, and cheerful while perceiving the conscientious advisor as straightforward, analytical, yet less confident. Therefore, to quantify the linguistic differences in the advisors, we conduct a financial sentiment analysis of the utterances generated by each advisor. For each utterance, we count the occurrences of positive, negative, and uncertain words from the Loughran and McDonald Financial Sentiment Dictionary [22]. We normalize these counts by the length of the sentences and average the results across all dialogues.", + "bbox": [ + 86, + 407, + 480, + 601 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Figure 4 shows the results, showing the extroverted sentiment scores in blue, and the conscientious scores in orange. For the three sentiment dimensions, differences between advisors are statistically significant (Welch's t-test with $p < 0.01$ ). Figure 4 shows that extroverted advisors tend to use more positive language in their interactions, while conscientious advisors prefer negative and uncertain tones. Through manual analysis of the conversation, we observe that this results in the extroverted advisor focusing on the positive aspects of investments while overlooking serious drawbacks, whereas the conscientious advisor provides a more balanced view of the assets. Because of this, participants guided by conscientious advisors may make more well-informed financial decisions. Meanwhile, the positivity of the extroverted advisor seems more appreciated by the users, which is reflected in higher advisor assessment scores from the post-discussion questionnaire.", + "bbox": [ + 86, + 602, + 480, + 809 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To answer RQ3, our results show that different personalities of a personalized LLM-advisor can affect the utility of the provided advice. This is demonstrated by the better decisions of the study", + "bbox": [ + 86, + 816, + 480, + 857 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/709d4faf5630cdd0daf077acd764d4a59ec9401ffa6ebd54c3c945fdcecc8bbc.jpg", + "image_caption": [ + "Figure 4: Average sentiment scores by advisor personality (extroverted in light blue and conscientious in pastel orange) and category (Positive, Negative, and Uncertainty). Error bars indicate the standard deviation." + ], + "image_footnote": [], + "bbox": [ + 547, + 104, + 879, + 243 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "participants when using an advisor with a conscientious personality than when using an advisor with an extroverted personality. Moreover, the personality of the advisor affects the perception of humans towards the system, and it has the risk of leading investors to further trust those systems that provide worse advice.", + "bbox": [ + 517, + 335, + 911, + 402 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 517, + 421, + 637, + 434 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this paper, we have conducted a lab-based user study to examine how effective large language models are as financial advisors. We focus on three core challenges: preference elicitation, investment personalization, and advisor personality.", + "bbox": [ + 517, + 439, + 911, + 494 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "First, our analysis shows that LLMs are effective tools for preference elicitation through conversation. In a majority of cases, they are capable of obtaining investor's preferences with an accuracy close to or equivalent to that of an expert human advisor. However, there are some clear failure cases, as LLMs are vulnerable to contradictory statements and hallucinations, which, in the case of complex investor profiles, can decrease the accuracy of the elicitation to random levels. Although LLMs are promising for elicitation, in a complex domain like finance, investors do not always fully understand their own preferences (or they have difficulties expressing them). Therefore, future work should explore the development of LLM-advisors capable of resolving conflicting user needs.", + "bbox": [ + 517, + 494, + 911, + 659 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Second, personalizing LLMs to provide investment advice can improve the decisions made by the investors, but only when the personalized LLM-advisor receives accurate information about the investor's preferences. If the preference elicitation is not successful, the agent actively directs the investors to the wrong assets on which to invest. This underscores how crucial a good preference elicitation is for providing useful financial advice.", + "bbox": [ + 517, + 660, + 911, + 756 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Finally, our results suggest that investors are not necessarily aware of what constitutes good financial advice, and therefore, are vulnerable to acting on bad advice provided by LLMs. In the comparison between a non-personalized and a personalized LLM-advisor, although the personalized system led to better decisions, participants were unable to distinguish between the systems. More worryingly, when comparing two personalized advisors with extroverted and conscientious personalities, we observed that, even though the extroverted advisor provided lower-quality advice, participants trusted this advisor more than the conscientious one.", + "bbox": [ + 517, + 757, + 911, + 895 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Are Generative AI Agents Effective Personalized Financial Advisors?", + "bbox": [ + 84, + 75, + 405, + 87 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "SIGIR 2025, July 13-18, 2018, Padua, Italy", + "bbox": [ + 715, + 75, + 911, + 87 + ], + "page_idx": 8 + }, + { + "type": "page_footnote", + "text": "8Participants were unaware of the specific personas during the study.", + "bbox": [ + 84, + 883, + 410, + 895 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Our findings highlight that, while personalized LLM-advisors represent a promising research direction, their use in high-stakes domains like finance is not free of risks: due to the limitations of LLMs at capturing complex investment preferences, and the difficulty of investors to discern whether the advice they receive truly serves their interests, LLMs have a notable risk to drive investors to bad financial assets (leading not only to a low satisfaction but also to potentially large monetary losses). However, these drawbacks open interesting research directions not only from a system perspective, but also from a human-centered approach: automated advisory development where we do not just focus on improving the quality of automated systems to guide investors, but also on how the investors will adopt, trust and interact with these AI agents [6, 20].", + "bbox": [ + 81, + 106, + 482, + 287 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 84, + 323, + 176, + 337 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] James E. Allen, Curry I. Guinn, and Eric Horvitz. 1999. Mixed-initiative interaction. IEEE Intelligent Systems and their Applications 14, 5 (1999), 14-23.", + "[2] Ashay Argal, Siddharth Gupta, Ajay Modi, Pratik Pandey, Simon Shim, and Chang Choo. 2018. Intelligent travel chatbot for predictive recommendation in echo platform. In 2018 IEEE 8th Annual Computing and Communication Workshop and Conference (CCWC 2018). IEEE, 176-183.", + "[3] Andreas Bucher, Mateusz Dolata, Sven Eckhardt, Dario Staehelin, and Gerhard Schwabe. 2024. Talking to Multi-Party Conversational Agents in Advisory Services: Command-based vs. Conversational Interactions. Proceedings of the ACM on Human-Computer Interaction 8, GROUP (2024).", + "[4] Wanling Cai, Yucheng Jin, and Li Chen. 2022. Impacts of personal characteristics on user trust in conversational recommender systems. In Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems (CHI 2022). Article 489, 14 pages.", + "[5] Gary Charness, Uri Gneezy, and Alex Imas. 2013. Experimental methods: Eliciting risk preferences. Journal of Economic Behavior & Organization 87 (2013), 43-51.", + "[6] Erin K. Chiou and John D. Lee. 2023. Trusting automation: Designing for responsivity and resilience. Human factors 65, 1 (2023), 137-165.", + "[7] Konstantina Christakopoulou, Filip Radlinski, and Katja Hofmann. 2016. Towards conversational recommender systems. In Proceedings of the 22nd ACM SIGKDD international conference on knowledge discovery and data mining (KDD 2016). 815-824.", + "[8] Berardina De Carolis, Marco de Gemmis, Pasquale Lops, and Giuseppe Palestra. 2017. Recognizing users feedback from non-verbal communicative acts in conversational recommender systems. Pattern Recognition Letters 99 (2017), 87-95.", + "[9] Mateusz Dolata, Doris Agotai, Simon Schubiger, and Gerhard Schwabe. 2019. Pen-and-paper Rituals in Service Interaction: Combining High-touch and High-tech in Financial Advisory Encounters. Proceedings of the ACM on Human-Computer Interaction 3, CSCW, Article 224 (2019).", + "[10] Eugene F Fama and Kenneth R French. 1998. Value versus growth: The international evidence. The journal of finance 53, 6 (1998), 1975-1999.", + "[11] Christian Hildebrand and Anouk Bergner. 2021. Conversational robo advisors as surrogates of trust: onboarding experience, firm perception, and consumer financial decision making. Journal of the Academy of Marketing Science 49, 4 (2021), 659-676.", + "[12] Dietmar Jannach, Ahtsham Manzoor, Wanling Cai, and Li Chen. 2021. A survey on conversational recommender systems. Comput. Surveys 54, 5 (2021), 1-36.", + "[13] Guangyuan Jiang, Manjie Xu, Song-Chun Zhu, Wenjuan Han, Chi Zhang, and Yixin Zhu. 2024. Evaluating and inducing personality in pre-trained language models. In Proceedings of the 37th Conference on Neural Information Processing Systems (NeurIPS 2023).", + "[14] Hang Jiang, Xiajie Zhang, Xubo Cao, Cynthia Breazeal, Deb Roy, and Jad Kabbara. 2024. PersonalLLM: Investigating the Ability of Large Language Models to Express Personality Traits. In Findings of the Association for Computational Linguistics: NAACL 2024. 3605-3627.", + "[15] Francis M. Kinniry Jr., Colleen M. Jaconetti, Michael A. DijJoseph, Yan Zilbering, Donald G. Bennyhoff, and Georgina Yarwood. 2020. Putting a value on your value: Quantifying Vanguard Adviser's Alpha in the UK. Technical Report. The Vanguard Group, Valley Forge, Pennsylvania, USA.", + "[16] Sherrie Y.X. Komiak and Izak Benbasat. 2006. The effects of personalization and familiarity on trust and adoption of recommendation agents. MIS quarterly (2006), 941-960.", + "[17] Ivica Kostric, Krisztian Balog, and Filip Radlinski. 2021. Soliciting user preferences in conversational recommender systems via usage-related questions. In Proceedings of the 15th ACM Conference on Recommender Systems. 724-729." + ], + "bbox": [ + 86, + 339, + 482, + 895 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[18] Kausik Lakkaraju, Sara E. Jones, Sai Krishna Revanth Vuruma, Vishal Pallagani, Bharath C. Muppasani, and Biplav Srivastava. 2023. LLMs for Financial Advise-ment: A Fairness and Efficacy Study in Personal Decision Making. In Proceedings of the 4th ACM Conference on AI in Finance (ICAIF 2023). 100-107.", + "[19] Cong Li. 2016. When does web-based personalization really work? The distinction between actual personalization and perceived personalization. Computers in human behavior 54 (2016), 25-33.", + "[20] Zhuoyan Li, Zhuoran Lu, and Ming Yin. 2023. Modeling human trust and reliance in AI-assisted decision making: a markovian approach. In Proceedings of the 37th AAAI Conference on Artificial Intelligence (AAAI 2023/IAAI 2023/EAAI 2023). Article 679.", + "[21] Andrew W. Lo and Jillian Ross. 2024. Can ChatGPT Plan Your Retirement?: Generative AI and Financial Advice. Harvard Data Science Review (2024). Issue Special Issue 5.", + "[22] Tim Loughran and Bill McDonald. 2011. When is a liability not a liability? Textual analysis, dictionaries, and 10-Ks. The Journal of finance 66, 1 (2011), 35-65.", + "[23] Robert R. McCrae and Oliver P. John. 1992. An introduction to the five-factor model and its applications. Journal of personality 60 2 (1992), 175-215.", + "[24] Sourav Medya, Mohammad Rasoolinejad, Yang Yang, and Brian Uzzi. 2022. An Exploratory Study of Stock Price Movements from Earnings Calls. In Companion Proceedings of the Web Conference 2022 (WWW 2022). Association for Computing Machinery, 20-31.", + "[25] Pearl Pu, Li Chen, and Rong Hu. 2011. A user-centric evaluation framework for recommender systems. In Proceedings of the 5th ACM conference on Recommender Systems (RecSys 2011). 157-164.", + "[26] Filip Radlinski, Krisztian Balog, Bill Byrne, and Karthik Krishnamoorthi. 2019. Coached conversational preference elicitation: A case study in understanding movie preferences. In Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue (SIGDIAL 2019). 353-360.", + "[27] Filip Radlinski and Nick Craswell. 2017. A theoretical framework for conversational search. In Proceedings of the 2nd Conference on Human Information Interaction and Retrieval (CHIIR 2017). 117-126.", + "[28] Oscar Sainz, Jon Campos, Iker Garcia-Ferrero, Julien Etxaniz, Oier Lopez de Lacalle, and Eneko Agirre. 2023. NLP Evaluation in trouble: On the Need to Measure LLM Data Contamination for each Benchmark. In Findings of the Association for Computational Linguistics: EMNLP 2023, Houda Bouamor, Juan Pino, and Kalika Bali (Eds.). Association for Computational Linguistics, 10776-10787.", + "[29] Tetsuya Sakai. 2018. Laboratory experiments in information retrieval. The information retrieval series 40 (2018), 4.", + "[30] Javier Sanz-Cruzado, Edward Richards, and Richard McCreadie. 2024. FAR-AI: A Modular Platform for Investment Recommendation in the Financial Domain. In Proceedings of the 46th European Conference on Information Retrieval (ECIR 2024), Part V. Springer-Verlag, Glasgow, United Kingdom, 267-271.", + "[31] Yunfan Shao, Linyang Li, Junqi Dai, and Xipeng Qiu. 2023. Character-LLM: A Trainable Agent for Role-Playing. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing (EMNLP 2023). Association for Computational Linguistics, 13153-13187.", + "[32] Tuva Lunde Smestad and Frode Volden. 2019. Chatbot personalities matters: improving the user experience of chatbot interfaces. In 5th International Conference Internet Science: (INSCI 2018). Springer, 170-181.", + "[33] David J Streich. 2023. Risk preference elicitation and financial advice taking. Journal of Behavioral Finance 24, 3 (2023), 259-275.", + "[34] Yueming Sun and Yi Zhang. 2018. Conversational recommender system. In Proceedings of the 41th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR 2018), 235-244.", + "[35] Takehiro Takayanagi, Kiyoshi Izumi, Atsuo Kato, Naoyuki Tsunedomi, and Yukina Abe. 2023. Personalized Stock Recommendation with Investors' Attention and Contextual Information. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR 2023). Association for Computing Machinery, 3339-3343.", + "[36] Johanne R. Trippas, Sara Fahad Dawood Al Lawati, Joel Mackenzie, and Luke Gallagher. 2024. What do Users Really Ask Large Language Models? An Initial Log Analysis of Google Bard Interactions in the Wild. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR 2024). 2703-2707.", + "[37] Johanne R. Trippas, Luke Gallagher, and Joel Mackenzie. 2024. Re-evaluating the Command-and-Control Paradigm in Conversational Search Interactions. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management (CIKM 2024). Association for Computing Machinery, 2260-2270.", + "[38] Patchara Vanichvasin. 2021. Chatbot Development as a Digital Learning Tool to Increase Students' Research Knowledge. International Education Studies 14, 2 (2021), 44-53.", + "[39] Xuena Wang, Xueting Li, Zi Yin, Yue Wu, and Jia Liu. 2023. Emotional intelligence of large language models. Journal of Pacific Rim Psychology 17 (2023), 18344909231213958.", + "[40] Pontus Wärnestäl. 2005. User evaluation of a conversational recommender system. In Proceedings of the 4th Workshop on Knowledge and Reasoning in Practical" + ], + "bbox": [ + 516, + 108, + 913, + 895 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "SIGIR 2025, July 13-18, 2018, Padua, Italy", + "bbox": [ + 84, + 75, + 282, + 87 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Takayanagi et al.", + "bbox": [ + 828, + 75, + 911, + 87 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Dialogue Systems.", + "[41] Hamed Zamani, Johanne R Trippas, Jeff Dalton, Filip Radlinski, et al. 2023. Conversational information seeking. Foundations and Trends in Information Retrieval 17, 3-4 (2023), 244-456.", + "[42] Markus Zanker, Laurens Rook, and Dietmar Jannach. 2019. Measuring the impact of online personalisation: Past, present and future. International Journal of Human-Computer Studies 131 (2019), 160–168.", + "[43] Yongfeng Zhang, Xu Chen, Qingyao Ai, Liu Yang, and W Bruce Croft. 2018. Towards conversational search and recommendation: System ask, user respond. In Proceedings of the 27th ACM International Conference on Information and Knowledge Management (CIKM 2018). 177-186." + ], + "bbox": [ + 84, + 108, + 483, + 220 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[44] Huaqin Zhao, Zhengliang Liu, Zihao Wu, Yiwei Li, Tianze Yang, Peng Shu, Shaochen Xu, Haixing Dai, Lin Zhao, Gengchen Mai, et al. 2024. Revolutionizing Finance with LLMs: An Overview of Applications and Insights. arXiv preprint arXiv:2401.11641 (2024).", + "[45] Dávid Zibriczky. 2016. Recommender systems meet finance: a literature review. In Proceedings of the 2nd International Workshop on Personalization & Recommender Systems in Financial Services (FinRec 2016). 1-10.", + "[46] Liv Ziegfeld, Daan Di Scala, and Anita HM Cremers. 2025. The effect of preference elicitation methods on the user experience in conversational recommender systems. Computer Speech & Language 89 (2025), 101696." + ], + "bbox": [ + 516, + 108, + 913, + 210 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Are Generative AI Agents Effective Personalized Financial Advisors?", + "bbox": [ + 83, + 75, + 405, + 85 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "SIGIR 2025, July 13-18, 2018, Padua, Italy", + "bbox": [ + 715, + 75, + 911, + 87 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05862/f5f20bac-767d-4260-82e6-943416d1d631_model.json b/data/2025/2504_05xxx/2504.05862/f5f20bac-767d-4260-82e6-943416d1d631_model.json new file mode 100644 index 0000000000000000000000000000000000000000..533da971b0ddd962271f8ce9fa52c8e7016dc1ab --- /dev/null +++ b/data/2025/2504_05xxx/2504.05862/f5f20bac-767d-4260-82e6-943416d1d631_model.json @@ -0,0 +1,2906 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.271, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.05862v2 [cs.AI] 15 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.126, + 0.102, + 0.87, + 0.149 + ], + "angle": 0, + "content": "Are Generative AI Agents Effective Personalized Financial Advisors?" + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.158, + 0.317, + 0.176 + ], + "angle": 0, + "content": "Takehiro Takayanagi" + }, + { + "type": "text", + "bbox": [ + 0.12, + 0.176, + 0.345, + 0.19 + ], + "angle": 0, + "content": "takayanagi-takehiro590@g.ecc.u-" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.191, + 0.27, + 0.205 + ], + "angle": 0, + "content": "tokyo.ac.jp" + }, + { + "type": "text", + "bbox": [ + 0.149, + 0.206, + 0.314, + 0.22 + ], + "angle": 0, + "content": "The University of Tokyo" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.221, + 0.276, + 0.235 + ], + "angle": 0, + "content": "Tokyo, Japan" + }, + { + "type": "text", + "bbox": [ + 0.442, + 0.158, + 0.558, + 0.175 + ], + "angle": 0, + "content": "Kiyoshi Izumi" + }, + { + "type": "text", + "bbox": [ + 0.413, + 0.176, + 0.588, + 0.19 + ], + "angle": 0, + "content": "izumi@sys.t.u-tokyo.ac.jp" + }, + { + "type": "text", + "bbox": [ + 0.417, + 0.191, + 0.582, + 0.205 + ], + "angle": 0, + "content": "The University of Tokyo" + }, + { + "type": "text", + "bbox": [ + 0.455, + 0.206, + 0.545, + 0.22 + ], + "angle": 0, + "content": "Tokyo, Japan" + }, + { + "type": "text", + "bbox": [ + 0.683, + 0.158, + 0.851, + 0.174 + ], + "angle": 0, + "content": "Javier Sanz-Cruzado" + }, + { + "type": "text", + "bbox": [ + 0.729, + 0.176, + 0.807, + 0.19 + ], + "angle": 0, + "content": "javier.sanz-" + }, + { + "type": "text", + "bbox": [ + 0.673, + 0.191, + 0.865, + 0.205 + ], + "angle": 0, + "content": "cruzadopuig@glasgow.ac.uk" + }, + { + "type": "text", + "bbox": [ + 0.693, + 0.206, + 0.844, + 0.22 + ], + "angle": 0, + "content": "University of Glasgow" + }, + { + "type": "text", + "bbox": [ + 0.679, + 0.221, + 0.857, + 0.235 + ], + "angle": 0, + "content": "Glasgow, United Kingdom" + }, + { + "type": "text", + "bbox": [ + 0.286, + 0.247, + 0.443, + 0.262 + ], + "angle": 0, + "content": "Richard McCreadie" + }, + { + "type": "text", + "bbox": [ + 0.252, + 0.264, + 0.48, + 0.279 + ], + "angle": 0, + "content": "richard.mccreadie@glasgow.ac.uk" + }, + { + "type": "text", + "bbox": [ + 0.289, + 0.28, + 0.441, + 0.294 + ], + "angle": 0, + "content": "University of Glasgow" + }, + { + "type": "text", + "bbox": [ + 0.276, + 0.295, + 0.454, + 0.309 + ], + "angle": 0, + "content": "Glasgow, United Kingdom" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.318, + 0.158, + 0.332 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.337, + 0.485, + 0.67 + ], + "angle": 0, + "content": "Large language model-based agents are becoming increasingly popular as a low-cost mechanism to provide personalized, conversational advice, and have demonstrated impressive capabilities in relatively simple scenarios, such as movie recommendations. But how do these agents perform in complex high-stakes domains, where domain expertise is essential and mistakes carry substantial risk? This paper investigates the effectiveness of LLM-advisors in the finance domain, focusing on three distinct challenges: (1) eliciting user preferences when users themselves may be unsure of their needs, (2) providing personalized guidance for diverse investment preferences, and (3) leveraging advisor personality to build relationships and foster trust. Via a lab-based user study with 64 participants, we show that LLM-advisors often match human advisor performance when eliciting preferences, although they can struggle to resolve conflicting user needs. When providing personalized advice, the LLM was able to positively influence user behavior, but demonstrated clear failure modes. Our results show that accurate preference elicitation is key, otherwise, the LLM-advisor has little impact, or can even direct the investor toward unsuitable assets. More worryingly, users appear insensitive to the quality of advice being given, or worse these can have an inverse relationship. Indeed, users reported a preference for and increased satisfaction as well as emotional trust with LLMs adopting an extroverted persona, even though those agents provided worse advice." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.681, + 0.202, + 0.697 + ], + "angle": 0, + "content": "CCS Concepts" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.7, + 0.484, + 0.727 + ], + "angle": 0, + "content": "- Information systems \\(\\rightarrow\\) Decision support systems; Personalization." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.739, + 0.169, + 0.754 + ], + "angle": 0, + "content": "Keywords" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.758, + 0.482, + 0.772 + ], + "angle": 0, + "content": "large language models, financial advisor, user study, generative AI" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.78, + 0.483, + 0.853 + ], + "angle": 0, + "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.853, + 0.278, + 0.864 + ], + "angle": 0, + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.865, + 0.473, + 0.875 + ], + "angle": 0, + "content": "© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.875, + 0.27, + 0.884 + ], + "angle": 0, + "content": "ACM ISBN 978-x-xxxxx-xxxxx-x/YY/MM" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.885, + 0.285, + 0.896 + ], + "angle": 0, + "content": "https://doi.org/10.1145/nnnnnnn.nnnnnnn" + }, + { + "type": "text", + "bbox": [ + 0.586, + 0.247, + 0.68, + 0.262 + ], + "angle": 0, + "content": "Iadh Ounis" + }, + { + "type": "text", + "bbox": [ + 0.544, + 0.264, + 0.723, + 0.278 + ], + "angle": 0, + "content": "iadh.ounis@glasgow.ac.uk" + }, + { + "type": "text", + "bbox": [ + 0.558, + 0.279, + 0.709, + 0.293 + ], + "angle": 0, + "content": "University of Glasgow" + }, + { + "type": "text", + "bbox": [ + 0.545, + 0.295, + 0.722, + 0.309 + ], + "angle": 0, + "content": "Glasgow, United Kingdom" + }, + { + "type": "image", + "bbox": [ + 0.539, + 0.316, + 0.889, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.459, + 0.914, + 0.488 + ], + "angle": 0, + "content": "Figure 1: Conceptual illustration of an LLM-advisor with two stages: (1) Preference Elicitation and (2) Advisory Discussion." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.502, + 0.662, + 0.514 + ], + "angle": 0, + "content": "ACM Reference Format:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.515, + 0.916, + 0.566 + ], + "angle": 0, + "content": "Takehiro Takayanagi, Kiyoshi Izumi, Javier Sanz-Cruzado, Richard McCreadie, and Iadh Ounis. 2025. Are Generative AI Agents Effective Personalized Financial Advisors?. In Proceedings of SIGIR 2025. ACM, New York, NY, USA, 11 pages. https://doi.org/10.1145/nnnnnnn.nnnnnnn" + }, + { + "type": "title", + "bbox": [ + 0.516, + 0.587, + 0.651, + 0.601 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.605, + 0.916, + 0.757 + ], + "angle": 0, + "content": "Personalized advice plays a crucial role in our society, particularly in complex and high-stakes domains like healthcare and finance. Advisors and professionals in these fields use their expertise to offer personalized guidance and emotional support to their clients, leveraging people's specific preferences and/or circumstances. However, advisory services are often provided at a high cost, effectively excluding a large portion of the population from this critical advice. In the financial domain, to mitigate this issue, automated decision support systems have been widely studied, with a special focus on investment-related predictions, such as financial asset recommendations [30, 35]." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.758, + 0.916, + 0.897 + ], + "angle": 0, + "content": "Recent advances in natural language processing and large language models (LLMs) have significantly accelerated the development of conversational agents, presenting the potential to function as personalized assistants for information-seeking and decision-making [41]. These agents can now leverage multi-turn dialogues, enabling dynamic, mixed-initiative interactions where both users and systems can take the lead in conversations [1]. This progression has expanded the application of conversational agents to various tasks, such as recommendation, question answering, and search [12, 27, 34, 41]." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.282, + 0.088 + ], + "angle": 0, + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + }, + { + "type": "header", + "bbox": [ + 0.83, + 0.076, + 0.912, + 0.087 + ], + "angle": 0, + "content": "Takayanagi et al." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.107, + 0.482, + 0.287 + ], + "angle": 0, + "content": "The application of these conversational agents for financial decision-making represents a much more complex scenario than others like movie recommendations, because users are not necessarily familiar with the basic terminology and concepts in this space, and mistakes carry a substantial risk that can lead to large monetary losses. While there is a growing interest in building these conversational assistants to provide automated financial advice [21], previous work has mostly targeted agents capable of handling simple inquiries [18, 36, 37]. Compared to these simple systems, helping users navigate financial decisions and market uncertainties poses a much greater challenge. Therefore, it is not yet clear how to develop systems that effectively support complex financial information-seeking and decision-making tasks." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.287, + 0.482, + 0.384 + ], + "angle": 0, + "content": "This work aims to close this gap by exploring the effectiveness of LLMs to act as personalized financial advisory agents. In particular, we focus on three problems: (a) eliciting investor preferences through interactive conversations, (b) providing personalized guidance to help users determine whether particular financial assets align with their preferences, and (c) leveraging the personality of the advisor to foster trust on the advisor." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.384, + 0.482, + 0.48 + ], + "angle": 0, + "content": "First, the financial literature emphasizes that eliciting user preferences is central to delivering suitable advice [33]. However, it remains unclear whether current conversational technologies, particularly those powered by LLMs, can correctly elicit user preferences in specialized domains where users struggle to articulate their needs. Our work addresses this challenge in the context of financial services." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.481, + 0.482, + 0.577 + ], + "angle": 0, + "content": "Second, although personalization is widely regarded as important in the financial decision-support literature [30, 35], its value in a conversational setting remains uncertain. In particular, we explore whether tailoring dialogue around a user's profile and context improves financial decision-making. Additionally, we also explore how personalization influences user perceptions of the advisor, in terms of aspects like trust and satisfaction." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.578, + 0.482, + 0.675 + ], + "angle": 0, + "content": "Finally, in personalized advisory settings within high-stakes domains, the relationship and trust between the client and advisor play a crucial role [21]. Research on conversational agents suggests that agent personality significantly affects users' perceptions of the system [4, 32]. However, it remains unclear how an advisor's personality in the financial domain influences both the quality of users' financial decisions and their overall experience." + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.675, + 0.482, + 0.689 + ], + "angle": 0, + "content": "To summarize, in this paper, we explore the following questions:" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.703, + 0.48, + 0.728 + ], + "angle": 0, + "content": "- RQ1: Can LLM-advisors effectively elicit user preferences through conversation?" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.731, + 0.48, + 0.755 + ], + "angle": 0, + "content": "- RQ2: Does personalization lead to better investment decisions and a more positive advisor assessment?" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.758, + 0.48, + 0.782 + ], + "angle": 0, + "content": "- RQ3: Do different personality traits affect decision quality and advisor assessment?" + }, + { + "type": "list", + "bbox": [ + 0.11, + 0.703, + 0.48, + 0.782 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.8, + 0.482, + 0.897 + ], + "angle": 0, + "content": "To address these questions, we conduct a lab-based user study that explores the effectiveness of LLMs as interactive conversational financial advisors, on which we simulate realistic investment scenarios using investor narratives and stock relevance scores curated by financial experts. Figure 1 illustrates an example conversation with the advisor, divided into two stages: first, the LLM-advisor attempts to capture the investor preferences through conversation; in" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.107, + 0.915, + 0.19 + ], + "angle": 0, + "content": "the second stage, given an individual asset, the advisor provides information about it to the investor, including how the asset matches (or not) the investor's preferences. To answer the different questions, we compare different configurations of the LLM-advisor: first, we compare personalized vs. non-personalized advisors, and, then, we compare two personalized advisors with distinct personalities." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.203, + 0.659, + 0.218 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.223, + 0.902, + 0.238 + ], + "angle": 0, + "content": "2.1 Personalization and Preference Elicitation" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.242, + 0.915, + 0.449 + ], + "angle": 0, + "content": "Information systems, especially those focused on search and recommendation benefit from personalization [16]. Specifically, personalization techniques play a crucial role in enhancing user experience [19, 25, 42]. Interactive approaches, such as conversational preference elicitation represent the frontier of personalization. This problem has received growing attention, as advances in generative AI now provide a functional mechanism to collect user preferences dynamically in a free-form manner [41]. This interactive approach can capture more diverse and targeted insights than static approaches like questionnaires [7, 12, 26, 27, 34]. Indeed, recent studies have proposed various methods for effective conversational preference elicitation [34, 43], as well as user studies on the perceived quality of this process in domains such as e-commerce, movies, fashion, books, travel, and restaurant recommendations [2, 8, 17, 26, 34, 46]." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.45, + 0.915, + 0.685 + ], + "angle": 0, + "content": "However, we argue that for some important domains, trying to directly collect preferences is insufficient. An implicit assumption of these studies is that if directly asked, the user will be able to accurately express their preferences. It is reasonable to expect that this assumption would hold for scenarios like movie recommendation; we can ask a user \"do you like horror movies?\" and expect a useful response. On the other hand, this will not hold for complex tasks, where the user lacks the knowledge to form an accurate response [12, 40]. For instance, in an investment context if we asked \"do you prefer ETFs or Bonds?\", it is not clear that an inexperienced user would be able to produce a meaningful answer. In these cases, an ideal agent needs to fill the gaps in the user knowledge through conversation, as well as infer the user preferences across multiple (often uncertain) user responses. But how effective are generative AI agents at this complex task? This paper aims to answer that question for the domain of financial advisory; a particularly challenging domain given its technical nature and high risks if done poorly." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.698, + 0.71, + 0.714 + ], + "angle": 0, + "content": "2.2 Financial advisory" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.716, + 0.915, + 0.897 + ], + "angle": 0, + "content": "In the financial domain, advisors help individuals manage their personal finances by offering guidance on investments and assisting with decision-making. While financial advisors can be beneficial, their services often come at a high cost, making them unaffordable for many people. To mitigate this issue, automated (nonconversational) financial decision support systems such as financial recommender systems have been widely studied [45]. The majority of research in this area has been focused on how to find profitable assets (i.e. those that will make money if we invest in them). These works assume a simplified user-model, where an investor is only concerned with maximizing return-on-investment over a fixed period of time [30, 35]. These studies frame financial advisory as a ranking problem, where the goal is to rank financial assets for a user" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.406, + 0.087 + ], + "angle": 0, + "content": "Are Generative AI Agents Effective Personalized Financial Advisors?" + }, + { + "type": "header", + "bbox": [ + 0.716, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + }, + { + "type": "image", + "bbox": [ + 0.152, + 0.104, + 0.849, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.245, + 0.915, + 0.274 + ], + "angle": 0, + "content": "Figure 2: Example of an investor profile, investment preferences, and ground truth ranking. Dashed line components are used for evaluation (and therefore, they are not shown to the user/LLM)." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.28, + 0.482, + 0.349 + ], + "angle": 0, + "content": "over a specified time period. However, a recent study suggests that a large part of the value offered by human financial advisors stems from their ability to personalize investment guidance to clients' specific needs, build relationships, and foster trust [15], rather than simply presenting suitable assets." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.35, + 0.483, + 0.543 + ], + "angle": 0, + "content": "Reflecting on these findings, the development of conversational financial advisors has drawn increasing attention, as it enables a dynamic understanding of users' needs, personalized guidance, and the potential to build trustworthy relationships [3, 9, 11, 18, 44]. In particular, the conversational agents' personality has gained attention as a factor that can help build relationships with clients and foster trust [21], especially given the successes of conversational agents using the Big Five personality model [23] to enhance the end-user experience [5, 33]. Although conversational agents show potential in finance, how to configure them to match the value of human advisors remains unclear. Therefore, we conduct a user study to examine how personalizing investment guidance and the advisor's personality shape users' financial decision-making effectiveness and overall user experience." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.56, + 0.222, + 0.575 + ], + "angle": 0, + "content": "3 Methodology" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.578, + 0.483, + 0.689 + ], + "angle": 0, + "content": "In this paper we aim to determine to what extent current generative language models can act as an effective financial advisor. Indeed, given the need to personalize for the user, emotional implications, the technical nature of the information-seeking task, and high impact if failed, we argue that this is an excellent test case for the limits of generative large language models. To structure our evaluation, we divide our study into two phases, as illustrated in Figure 1, where we evaluate the success of both:" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.696, + 0.483, + 0.776 + ], + "angle": 0, + "content": "(1) Preference Elicitation: During this stage, we have the LLM-advisor hold a natural language conversation with a human, where it is directed to collect information regarding the person's investment preferences. The human in this interaction is pretending to have preferences from a given investor profile." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.779, + 0.482, + 0.862 + ], + "angle": 0, + "content": "(2) Advisory Discussion: During the advisory discussion, the LLM-advisor again has a natural language conversation with the human (acting on an investor profile), where the human collects information about whether a company is a suitable investment for them. This is repeated for multiple companies per investor profile." + }, + { + "type": "list", + "bbox": [ + 0.102, + 0.696, + 0.483, + 0.862 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.869, + 0.482, + 0.896 + ], + "angle": 0, + "content": "We provide preparatory information and discuss each stage in more detail below:" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.279, + 0.694, + 0.292 + ], + "angle": 0, + "content": "3.1 Investor Profiles" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.297, + 0.915, + 0.435 + ], + "angle": 0, + "content": "To fairly evaluate the ability of any LLM-advisor, we need to have them interact with human users with real needs. Given the open-ended nature of free-form conversations, it is desirable to repeat each experiment with different people such that we can observe variances in conversation paths, as those variances may influence task success. However, to enable repeatability, we need to hold the investor needs constant across repetitions. Hence, we define three archetypal investor profiles \\( i \\in I \\) based on input from a financial expert, where our human participants are given one to follow when conversing with the LLM-advisor:" + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.441, + 0.915, + 0.481 + ], + "angle": 0, + "content": "- Investor 1: Growth-Oriented Healthcare Enthusiast: Prefers healthcare innovations, values high-growth opportunities, and takes measured risks." + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.483, + 0.913, + 0.524 + ], + "angle": 0, + "content": "- Investor 2: Conservative Income Seeker: Seeks stable returns, invests in well-established companies, values regular dividend payouts." + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.525, + 0.915, + 0.566 + ], + "angle": 0, + "content": "- Investor 3: Risk-taking Value Investor: Targets undervalued companies with strong long-term potential, tolerates short-term volatility, and invests in cyclical sectors." + }, + { + "type": "list", + "bbox": [ + 0.541, + 0.441, + 0.915, + 0.566 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.571, + 0.915, + 0.752 + ], + "angle": 0, + "content": "For each of these investor profiles, we select three key investment preferences, chosen from well-known investment characteristics such as industry sector, stock style, consistency in dividend payments, and sensitivity to global market changes [10]. We denote the set of investor preferences as \\( i^{pref} \\). In our experiments, we simulate a realistic elicitation scenario where the advisor collects the preferences from the participants. Therefore, we do not straightforwardly provide the preferences to the participants. Instead, we present them as text narratives of between 150 to 200 words. A financial expert was consulted to confirm the quality and reliability of these narratives. An example narrative representing Investor 2 is illustrated in Figure 2, where we highlight the sentences referring to specific investor preferences." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.766, + 0.803, + 0.781 + ], + "angle": 0, + "content": "3.2 Stage 1: Preference Elicitation" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.785, + 0.915, + 0.897 + ], + "angle": 0, + "content": "The goal of stage 1 of our study is to determine to what extent an LLM-advisor can effectively collect a user's investment preferences through conversation. Formally, given a participant of the user study \\( u \\) and an investor profile \\( i \\), during the elicitation stage, the LLM-advisor aims to obtain an approximated set of preferences, denoted \\( i_u^{LLM} \\), that matches the investor preferences (\\( i^{pref} \\)). To achieve this, the generative model produces a series of questions that participants answer by interpreting the investor narrative." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.283, + 0.087 + ], + "angle": 0, + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + }, + { + "type": "header", + "bbox": [ + 0.83, + 0.076, + 0.913, + 0.088 + ], + "angle": 0, + "content": "Takayanagi et al." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.107, + 0.482, + 0.152 + ], + "angle": 0, + "content": "Responses to those questions, denoted as \\( R_{i}^{u} \\), are used by the LLM-advisor to generate the user profile \\( i_{u}^{LLM} \\). Success is then measured by manually evaluating the overlap between \\( i^{pref} \\) and \\( i_{u}^{LLM} \\)." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.152, + 0.483, + 0.277 + ], + "angle": 0, + "content": "For user elicitation, we adopted a System-Ask-User-Respond (SAUR) paradigm [43]. During the conversation, the advisor proactively inquires about the user's preferences given a set of target preferences (e.g., industry type, acceptable risk). After the human participant responds to a question, the LLM-advisor checks whether the collected preferences cover all of the target preferences. If the advisor is confident that they do, it ends the conversation and prompts the user to proceed to the next stage; otherwise, it continues asking follow-up questions in a loop." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.288, + 0.36, + 0.304 + ], + "angle": 0, + "content": "3.3 Stage 2: Advisory Discussion" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.306, + 0.483, + 0.459 + ], + "angle": 0, + "content": "Stage 2 of our study investigates to what extent an LLM-advisor can provide the same benefits as a real human advisor when exploring investment options. Note that the goal here is not to have the LLM-advisor promote any one asset, but rather to provide accurate and meaningful information such that the human can find the best investment opportunity for them. To this end, we structure our experiment such that the human (acting on an investor profile) has one conversation with the LLM-advisor for each of a set of assets being considered.1 After all assets are presented to the participant, a stock ranking is generated by sorting the stocks by the participant rating in descending order." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.459, + 0.483, + 0.627 + ], + "angle": 0, + "content": "Importantly, as we know the investor profile \\( i^{pref} \\) for each conversation about an asset \\( a \\), we can objectively determine whether \\( a \\) is a good investment given \\( i^{pref} \\), forming a ground truth against which we can compare to the rating provided by our human participant after their conversation with the LLM-advisor. For each asset \\( a \\), a financial expert produced a score between 0 and 3 by manually checking whether \\( a \\) satisfied each of the three investment criteria contained in \\( i^{pref} \\). A ground-truth ranking was produced by sorting the assets by the expert scores. We show an example of the ranking construction in Figure 2. During evaluation, the closer the participant ranking is to the ranking produced by expert judgments, the better the LLM-advisor performed." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.634, + 0.483, + 0.718 + ], + "angle": 0, + "content": "Baseline Prompt: As we are working with an LLM-advisor and the nature of financial information-seeking is time-sensitive, we need to provide any information that might change over time to the LLM within the prompt. As such, for each asset \\( a \\), we pre-prepared a standard asset descriptor block after consulting with a financial expert, containing:" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.72, + 0.48, + 0.746 + ], + "angle": 0, + "content": "Stock Prices: We collect monthly stock prices from 2023 using Yahoo! Finance.2" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.748, + 0.481, + 0.774 + ], + "angle": 0, + "content": "- Business Summary: We gather each company's business overview from Yahoo! Finance." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.776, + 0.482, + 0.817 + ], + "angle": 0, + "content": "- Recent Performance and Key Financial Indicators (e.g., EPS): We obtain earnings conference call transcripts3 from Seeking Alpha for the last quarter of 2023." + }, + { + "type": "list", + "bbox": [ + 0.11, + 0.72, + 0.482, + 0.817 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.082, + 0.825, + 0.483, + 0.847 + ], + "angle": 0, + "content": "These were manually selected, however in a production environment these might be produced by an asset recommendation system." + }, + { + "type": "page_footnote", + "bbox": [ + 0.082, + 0.847, + 0.483, + 0.878 + ], + "angle": 0, + "content": "2The scenario for the financial advising of our user study is set to December 30, 2023. By basing our experiment at the end of 2023, we avoid the problem of data contamination [28]." + }, + { + "type": "page_footnote", + "bbox": [ + 0.082, + 0.877, + 0.482, + 0.91 + ], + "angle": 0, + "content": "3Earnings conference calls, hosted by publicly traded companies, discuss key aspects of their earnings reports and future goals with financial analysts and investors, thus covering critical financial indicators and recent performance insights [24]. These" + }, + { + "type": "list", + "bbox": [ + 0.082, + 0.825, + 0.483, + 0.91 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.529, + 0.105, + 0.898, + 0.324 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.612, + 0.337, + 0.816, + 0.351 + ], + "angle": 0, + "content": "Figure 3: User study structure." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.357, + 0.915, + 0.412 + ], + "angle": 0, + "content": "The advisor using this prompt acts as our baseline for the advisory discussion study. We augment this baseline with additional context and instructions to form two additional experimental scenarios, discussed below:" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.42, + 0.915, + 0.504 + ], + "angle": 0, + "content": "+Personalization: As discussed earlier, one of the core roles of the financial advisor is to personalize to the individual customer, based on their financial situation, needs, and preferences. To enable the LLM-advisor to personalize for the user, we integrate the generated profile from the preference elicitation (Stage 1) \\( i_u^{LLM} \\) into the prompt. We represent each preference as a series of short sentences." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.51, + 0.915, + 0.635 + ], + "angle": 0, + "content": "+Personality: In Section 2.2 we discussed how human financial advisors provide emotional support as well as financial advice. While it is unlikely that an LLM-advisor could do this as well as a human (it lacks both emotional intelligence and non-conversational clues to the customer's mental state [39]) it might be possible to provide a better end-user experience by directing the LLM-advisor to adopt a personality. As noted in Section 2 it is possible to do this via prompt engineering, such as instructing the LLM to take on the traits of one or more of the Big-Five personality types [23]." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.635, + 0.913, + 0.69 + ], + "angle": 0, + "content": "As we are performing a user study with humans, it would be impractical to exhaustively test every combination of personality types, hence as an initial investigation we experiment with two distinct personality profiles [32]:" + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.694, + 0.914, + 0.72 + ], + "angle": 0, + "content": "- Extroverted: High in extroversion, agreeableness, and openness; low in conscientiousness and neuroticism." + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.721, + 0.914, + 0.748 + ], + "angle": 0, + "content": "- Conscientious: Low in extroversion, agreeableness, and openness; high in conscientiousness and neuroticism." + }, + { + "type": "list", + "bbox": [ + 0.541, + 0.694, + 0.914, + 0.748 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.751, + 0.915, + 0.863 + ], + "angle": 0, + "content": "We adopted the prompting method from Jiang et al. (2024) to assign a Big Five personality trait to the LLM agent [14], choosing it for its simplicity and effectiveness among various proposed approaches for embedding personality in LLMs (including both prompting and fine-tuning) [13, 14, 31]. To ensure a high standard of professionalism and accurate representation of the intended personality, we consulted financial professionals to review the texts generated by LLMs adopting both personas." + }, + { + "type": "page_footnote", + "bbox": [ + 0.514, + 0.873, + 0.915, + 0.897 + ], + "angle": 0, + "content": "transcripts cover significant financial indicators and provide explanations of recent performance." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.406, + 0.088 + ], + "angle": 0, + "content": "Are Generative AI Agents Effective Personalized Financial Advisors?" + }, + { + "type": "header", + "bbox": [ + 0.716, + 0.076, + 0.913, + 0.088 + ], + "angle": 0, + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.106, + 0.3, + 0.122 + ], + "angle": 0, + "content": "3.4 Experimental Design" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.125, + 0.482, + 0.221 + ], + "angle": 0, + "content": "In our experiment, we conducted two studies: a personalization study (for RQ2) and an advisor persona study (for RQ3). In the personalization study, participants compared a non-personalized (Baseline) advisor with a personalized (+Personalized) version. In the advisor persona study, they compared different LLM-advisor personality types (+Extroverted vs. +Conscientious). Participants are randomly assigned to one of these two studies." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.222, + 0.483, + 0.25 + ], + "angle": 0, + "content": "Figure 3 shows the structure of our user study for a single participant, comprising seven steps:" + }, + { + "type": "text", + "bbox": [ + 0.101, + 0.265, + 0.504, + 0.32 + ], + "angle": 0, + "content": "(1) Participant Training: Participants are given a general overview of the user study and given instructions on their expected roles during preference elicitation, advisory discussions, asset ranking, and advisor assessment." + }, + { + "type": "text", + "bbox": [ + 0.101, + 0.32, + 0.483, + 0.362 + ], + "angle": 0, + "content": "(2) Investor Profile Allocation: The user \\( u \\) is randomly allocated one of the investor profiles (See Section 3.1) that they will follow. Each profile is assigned to 42 participants." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.362, + 0.483, + 0.444 + ], + "angle": 0, + "content": "(3) Preference Elicitation (Stage 1): The participant interacts with the LLM-advisor as if they were a new investor. The conversation ends once the LLM-advisor determines that they know enough about the investor to personalize for them. The median time spent on preference elicitation was 5 minutes and 11 seconds." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.446, + 0.483, + 0.532 + ], + "angle": 0, + "content": "(4) Response Summarization: Given the aggregator of user responses \\( R_{i}^{u} \\), we instruct an LLM to generate an investor profile \\( i_{u}^{LLM} \\). For each investor preference in \\( i^{pref} \\), if there is any relevant information in the responses \\( R_{i}^{u} \\), that information is included in \\( i_{u}^{LLM} \\). Otherwise, \\( i_{u}^{LLM} \\) indicates that no relevant information is available for that specific preference." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.532, + 0.483, + 0.684 + ], + "angle": 0, + "content": "(5) Advisory Discussion (Stage 2): To simplify the conversation flow we have the participant hold separate conversations with the LLM-advisor for each asset they might invest in. The LLM-advisor is provided with context about the current asset (see Section 3.3), and depending on the experimental scenario, optionally personalization information (step 4 output) and/or a target personality context statement. Each conversation continues until the user is satisfied that they have enough information to rate the asset. The order in which the assets are discussed is randomly assigned to avoid position bias." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.684, + 0.483, + 0.754 + ], + "angle": 0, + "content": "(6) Asset Ranking and Feedback: Participants rank all the stocks (four in total) discussed in the advisory session according to their desire to invest in each. They also assess the advisor they interacted with using a 7-point Likert scale for the items listed in Table 1 (see Section 4)." + }, + { + "type": "list", + "bbox": [ + 0.101, + 0.265, + 0.504, + 0.754 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.77, + 0.483, + 0.84 + ], + "angle": 0, + "content": "To enable more effective pair-wise comparison of LLM-advisor variants, we have each participant test two variants per study. If the user has only tested one variant at this point, then they repeat the user study (starting at step 2) with the second variant. The order in which participants experience each variant is randomly assigned." + }, + { + "type": "page_footnote", + "bbox": [ + 0.102, + 0.855, + 0.483, + 0.897 + ], + "angle": 0, + "content": "(7) Exit Questionnaire: Once a pair of LLM-advisor variants have been tested, the user fills in an exit questionnaire that is designed to ask the overall experience in the user study." + }, + { + "type": "table_caption", + "bbox": [ + 0.515, + 0.105, + 0.916, + 0.133 + ], + "angle": 0, + "content": "Table 1: Operational definitions used in the advisor assessment questionnaire for all response dimensions." + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.146, + 0.915, + 0.263 + ], + "angle": 0, + "content": "
Response DimensionOperational Definition
Perceived Personalization [16]The advisor understands my needs.
Emotional Trust [16]I feel content about relying on this advisor for my decisions.
Trust in Competence [16]The advisor has good knowledge of the stock.
Intention to Use [16]I am willing to use this advisor as an aid to help with my decision about which stock to purchase.
Perceived Usefulness [25]The advisor gave me good suggestions.
Overall Satisfaction [25]Overall, I am satisfied with the advisor.
Information Provision [38]The advisor provides the financial knowledge needed.
" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.275, + 0.913, + 0.303 + ], + "angle": 0, + "content": "In our experiments, we use Llama-3.1 8B as the background model for all our LLM-advisor variants.\\(^4\\)" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.315, + 0.66, + 0.331 + ], + "angle": 0, + "content": "3.5 Participants" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.333, + 0.916, + 0.555 + ], + "angle": 0, + "content": "We recruited 64 participants from the authors' affiliated university for our study: 32 participants for the personalization study and 32 participants for the advisor persona study, utilizing the university's online platform and blackboard for recruitment. Participants were required to be fluent in English, over 18 years old, and have an interest in finance and investment, mirroring the target demographic of our system's users. After excluding invalid data, 29 participants remained in the personalization study and 31 in the advisor persona study. We conducted a power analysis using the Wilcoxon signed-rank test for matched pairs, with the experimental conditions as the independent variable and users' response to the advisor assessment questionnaire as the dependent variable [29]. The analysis determined that 29 participants are needed to observe a statistically significant effect on user-perceived quality. Our recruitment criteria and compensation (£10/hour) for approximately one hour of participation were approved by our organization's ethical board." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.567, + 0.812, + 0.582 + ], + "angle": 0, + "content": "4 Evaluation Metrics and Statistics" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.585, + 0.915, + 0.628 + ], + "angle": 0, + "content": "In this section we discuss how we quantify effectiveness for the preference elicitation and advisory discussion stages, respectively, in addition to summarizing dataset statistics for each." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.639, + 0.874, + 0.656 + ], + "angle": 0, + "content": "4.1 Preference Elicitation Metrics (Stage 1)" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.658, + 0.915, + 0.854 + ], + "angle": 0, + "content": "To evaluate the quality of the first preference elicitation stage, we want to measure how well the LLM-advisor has captured the investor preferences as defined in the investor profile \\( i \\) (see Section 3.1). Each investor profile \\( i \\in I \\) defines key features of the investor, such as preferring high-growth stocks, or favoring regular payouts, denoted \\( i^{pref} \\). We have three investor profiles (\\( |I| = 3 \\), with \\( 10(n) \\) participants performing elicitation on \\( i_u^{LLM} \\) for each profile and each LLM variant, i.e. there are 120 elicitation attempts in total, with 30 attempts per LLM-advisor variant. Following the notation in Section 3, \\( i_u^{LLM} \\) in this case denotes a similar list of features to \\( i^{pref} \\) that LLM-advisor learned about the investor during conversation with a participant \\( u \\), which we derive from a manual analysis of the elicitation output (i.e. what is produced by response summarization). Intuitively, the closer the features produced from" + }, + { + "type": "page_footnote", + "bbox": [ + 0.514, + 0.863, + 0.916, + 0.897 + ], + "angle": 0, + "content": "Further details about the LLM configuration, investor narratives, relevant scores, prompts and scripts for data analysis can be accessed at the following repository: https://github.com/TTsamurai/LLMAdvisor_supplement" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.283, + 0.088 + ], + "angle": 0, + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + }, + { + "type": "header", + "bbox": [ + 0.83, + 0.076, + 0.913, + 0.088 + ], + "angle": 0, + "content": "Takayanagi et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.084, + 0.105, + 0.482, + 0.119 + ], + "angle": 0, + "content": "Table 2: General statistics of the collected conversation data." + }, + { + "type": "table", + "bbox": [ + 0.115, + 0.131, + 0.451, + 0.319 + ], + "angle": 0, + "content": "
Participants60
Time Period2024/10/24 ~ 2024/11/7
Total Turns10,008
Stage 1: Preference Elicitation
Total Turns1,788
Number of Sessions120
Avg. Turns/Session15.8
Avg. User Words/Turn9.8
Stage 2: Advisory Discussion
Total Turns8,220
Number of Sessions480
Avg. Turns/Session18.2
Avg. User Words/Turn13.0
" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.331, + 0.483, + 0.375 + ], + "angle": 0, + "content": "any elicitation attempt \\( i_{u}^{LLM} \\) is to \\( i^{pref} \\), the better the LLM-advisor is performing. To this end, we report elicitation accuracy for each investor profile, calculated as:" + }, + { + "type": "equation", + "bbox": [ + 0.142, + 0.38, + 0.483, + 0.425 + ], + "angle": 0, + "content": "\\[\n\\text {E l i c i t a t i o n A c c u r a c y} (i) = \\frac {1}{n} \\sum_ {j = 1} ^ {n} \\frac {\\left| i _ {j} ^ {L L M} \\cap i ^ {p r e f} \\right|}{\\left| i ^ {p r e f} \\right|} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.437, + 0.483, + 0.538 + ], + "angle": 0, + "content": "Human Advisor: To provide a point of comparison, we also conduct a preference elicitation with a financial expert using the same prompt and instructions as the LLM. This allows us to evaluate how close LLMs are to a paid human advisor undertaking the same task. More specifically, for each investor profile, three participants engaged with this expert, who then produced a set of preferences \\( i_u^{Expert} \\), which can be used instead of \\( i_u^{LLM} \\) in Equation 1." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.549, + 0.45, + 0.566 + ], + "angle": 0, + "content": "4.2 Advisory Effectiveness Metrics (Stage 2)" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.568, + 0.483, + 0.793 + ], + "angle": 0, + "content": "Ranking correlation (Spearman's Rho): In the second stage, we evaluate how well the LLM-advisor can support an investor to select financial assets that are suitable for them to invest in. Recall from Figure 3 that after a participant finishes discussing all assets with the LLM-advisor, they rank those assets \\(a \\in A_i\\) based on the likelihood they will invest in each, i.e. each participant \\(u\\) acting on a profile \\(i\\) we have an asset ranking \\(R(A_i, i_u)\\). As illustrated in Figure 2, each investor profile \\(i\\) was derived from a ground truth set of investor preferences \\(i^{pref}\\), which an expert used to create a ground truth ranking \\(R(A_i, i^{pref})\\), i.e. the \"correct\" ranking of assets. Intuitively the closer the \\(R(A_i, i_u)\\) is to \\(R(A_i, i^{pref})\\), the better the advisor is performing, as the participant was better able to distinguish suitable assets vs. unsuitable ones. Hence, to evaluate the effectiveness of the advisory task, we report the mean ranking correlation (Spearman's Rho) between \\(R(A_i, i_u)\\) and \\(R(A_i, i^{pref})\\) across participants \\(u\\) for each LLM-advisor." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.799, + 0.483, + 0.897 + ], + "angle": 0, + "content": "Advisor Assessment Questionnaire: Lastly, we also gather qualitative data from each participant via a questionnaire. In particular, after ranking assets each participant, reports how they feel the LLM-advisor performed in terms of 7 dimensions, listed in Table 1, such as perceived usefulness, trust, and user satisfaction. We use this data later to evaluate how sensitive the user is to differences in the LLM-advisor." + }, + { + "type": "table_caption", + "bbox": [ + 0.514, + 0.105, + 0.916, + 0.162 + ], + "angle": 0, + "content": "Table 3: Stage 1 - Comparison of Elicitation Accuracy of an expert vs. different LLM-advisors for each investor profile. The best advisor is highlighted in bold. Arrows denote percentage increases \\((\\uparrow)\\) or decreases \\((\\downarrow)\\) compared to the expert." + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.173, + 0.915, + 0.27 + ], + "angle": 0, + "content": "
Investor ProfileExpertLLM-Advisors
LLM+Extr.+Cons.Average
Growth-Oriented0.780.760.800.790.78→0.0%
Conservative-Income0.890.820.750.870.82↓7.8%
Risk-Taking0.890.480.600.550.53↓40.5%
Average0.850.690.700.730.70↓17.6%
" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.273, + 0.698, + 0.287 + ], + "angle": 0, + "content": "4.3 Dataset Statistics" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.291, + 0.915, + 0.388 + ], + "angle": 0, + "content": "Table 2 summarizes the statistics of the data collected during the two stages of our user study. Each conversation that a participant had with an LLM-advisor in either stage 1 or 2 is referred to as a session, e.g. during Stage 1, there were 3 investor profiles * 10 participants * 4 LLM-advisors, resulting in 120 sessions. Stage 2 has 4x the number of sessions, as there are four assets associated with each profile (\\( A_i \\)) to discuss with the LLM-advisor." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.388, + 0.915, + 0.471 + ], + "angle": 0, + "content": "From Table 2 we observe that in contrast to other conversational tasks [36, 37], financial information-seeking appears to require more extended interactions. On average, preference elicitation involves 15 turns per session with 9.8 words per turn, whereas advisory discussions involve 18 turns per session with 13.0 words per turn, highlighting the overall complexity of the task." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.482, + 0.608, + 0.497 + ], + "angle": 0, + "content": "5 Results" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.501, + 0.916, + 0.543 + ], + "angle": 0, + "content": "In this work, we explore how to design conversational financial advisors that enhance both decision-making and positive experience. To achieve this, our user study is guided by 3 core research questions." + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.545, + 0.912, + 0.571 + ], + "angle": 0, + "content": "- RQ1: Can LLM-advisors effectively elicit user preferences through conversation?" + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.573, + 0.913, + 0.599 + ], + "angle": 0, + "content": "- RQ2: Does personalization lead to better decisions and more positive advisor assessment?" + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.601, + 0.913, + 0.627 + ], + "angle": 0, + "content": "- RQ3: Do different personality traits affect decision quality and advisor assessment?" + }, + { + "type": "list", + "bbox": [ + 0.541, + 0.545, + 0.913, + 0.627 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.643, + 0.766, + 0.659 + ], + "angle": 0, + "content": "5.1 RQ1: Elicitation accuracy" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.661, + 0.915, + 0.772 + ], + "angle": 0, + "content": "We begin by examining how effective the LLM-advisors are at identifying investment preferences during conversations in Stage 1. Elicitation Accuracy is the primary metric, where we contrast the mean accuracy across 10 sessions in comparison to a human expert tackling the same task (see Section 4.1). Table 3 reports elicitation accuracy for each LLM-advisor and the Human Expert across investment profiles. Arrows denote percentage increases \\((\\uparrow)\\) or decreases \\((\\downarrow)\\) of the LLM-advisor compared to the expert." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.772, + 0.915, + 0.841 + ], + "angle": 0, + "content": "To set expectations, we first consider the performance of the expert in the first column in Table 3, as we might expect, the expert maintains consistently high performance across all profiles, averaging \\(85\\%\\) accuracy (random accuracy is \\(50\\%\\)). This forms an expectation of the performance ceiling for the task." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.841, + 0.915, + 0.897 + ], + "angle": 0, + "content": "Next, we compare the expert performance to each LLM-advisor. From the perspective of preference elicitation, there are three LLM-advisor configurations, those that use only the Baseline Prompt (denoted LLM) from the personalization study, and those that include" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.406, + 0.088 + ], + "angle": 0, + "content": "Are Generative AI Agents Effective Personalized Financial Advisors?" + }, + { + "type": "header", + "bbox": [ + 0.716, + 0.076, + 0.913, + 0.088 + ], + "angle": 0, + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.107, + 0.482, + 0.205 + ], + "angle": 0, + "content": "a defined personality (either extroverted, \\(+\\mathrm{Extr}\\), or conscientious, \\(+\\mathrm{Cons}\\).) from the advisor persona study. From Table 3, we observe that the LLM-advisor's performance is generally strong for growth-oriented, and conservative-income investors (with accuracy around \\(80\\%\\)) on average, which is similar to the human advisor. However, for the risk-taking investor profile, the LLM-advisor's elicitation accuracy was substantially lower \\((-40.5\\%)\\)." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.206, + 0.483, + 0.468 + ], + "angle": 0, + "content": "From a manual failure analysis, we observed the following trends that contribute to the performance gap with the human advisor, particularly for the risk-taking profile. First, it is notable that elicitation failures can originate from the investor (participant) rather than the LLM. Recall that one of the aspects that makes finance more challenging than domains like movie recommendation is that the \"user\" is inexpert, and so may give incorrect information during the conversation. Indeed, we observed cases where the participant confused concepts such as the difference between a growth and a value stock, as well as cyclical/non-cyclical assets. On the other side, preference hallucination is a core issue for the LLM-advisor. The LLM is a probabilistic token generator conditioned on the baseline prompt and prior conversation, and as a result, in some scenarios, the contextual content can override a statement by the investor. This type of error is more likely when the investor is unsure in their responses or when they provide contradictory statements. For instance, an investor expressing an interest in the consumer discretionary sector while simultaneously opting for non-cyclical stocks, despite consumer discretionary being inherently cyclical." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.474, + 0.483, + 0.626 + ], + "angle": 0, + "content": "To answer RQ1, our results demonstrate that LLM-advisor's are able to elicit preferences from a user via conversation and that for 2/3's of the user profiles tested, elicitation accuracy was consistently equivalent or close to that of an expert human advisor. However, we observed a clear failure mode when testing the risk-taking profile, where misunderstandings by the investors and hallucinations within the LLM compound to result in accuracy that is close to random. Overall, we consider this a promising result, as the majority of the time it is effective, and the failure mode observed might be rectified by better context crafting and the addition of contradiction detection; both directions for future research." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.64, + 0.431, + 0.655 + ], + "angle": 0, + "content": "5.2 RQ2: Effectiveness of personalization" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.658, + 0.483, + 0.81 + ], + "angle": 0, + "content": "Having shown that automatic preference elicitation is possible, we now examine stage 2 of our study, namely the advisory discussions. Given the inherently personalized nature of financial advice, we expect that the customer preferences obtained during stage 1 will be key to enabling LLM-advisors to provide effective investment advice. Hence, in this section, we compare the performance of an LLM-advisor using only the Baseline Prompt to one that includes the preferences obtained during stage 1 (+Personalized). However, as we observed that preference elicitation is not always successful, we also examine what effect elicitation performance has on the LLM-advisor." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.819, + 0.483, + 0.86 + ], + "angle": 0, + "content": "5.2.1 Non-personalized Decision-making Effectiveness: We initially establish how effective the LLM-advisor is without any information regarding the investor. LLM-advisor effectiveness is measured" + }, + { + "type": "table_caption", + "bbox": [ + 0.514, + 0.105, + 0.916, + 0.203 + ], + "angle": 0, + "content": "Table 4: Investor decision-making effectiveness, expressed as the Spearman's Rho correlation between the investor's asset ranking and the expert asset ranking (higher is better). \\(\\dagger\\) indicates statistical improvements (Welch's t-test with \\(p<0.05\\)) over the not personalized baseline, while \\(\\S\\) indicates significant differences between cases with successful and unsuccessful preference elicitations." + }, + { + "type": "table", + "bbox": [ + 0.517, + 0.216, + 0.913, + 0.312 + ], + "angle": 0, + "content": "
Advisor ConfigInvestor vs. Expert (Spearman's Rho)
PersonalizationPersonalityAllPreference Elicitation
SuccessfulUnsuccessful
BaselineNone0.110--
+PersonalizedNone0.3100.481†§-0.228
+Personalized+Extroverted0.1220.243§-0.286
+Personalized+Conscientious0.260.365-0.025
" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.319, + 0.915, + 0.43 + ], + "angle": 0, + "content": "based on how well the investor was able to rank the assets discussed by suitability to them. The primary metric is average Spearman's Rho correlation between the investor ranking and the ground truth ranking (see Section 4.2), reported in Table 4 row 1. As we expect, baseline advisory performance is low, with only a very weak positive correlation to the ground truth ranking of 0.11. This indicates that without further evidence, the LLM is not able to meaningfully guide the investor." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.438, + 0.916, + 0.576 + ], + "angle": 0, + "content": "5.2.2 Personalized Decision-making Effectiveness: Having established our baseline, we now examine the impact that adding the investor preferences collected during stage 1 has, comparing Table 4 row 1 (baseline) to row 2 (personalized). As we anticipated, personalization is beneficial, with investor decision-making effectiveness increasing from 0.11 to 0.31 (average Spearman's Rho correlation to the expert ranking). However, this correlation is still weak, illustrating that while discussing assets with the LLM-advisor is better than no help at all, our participants are still struggling to evaluate the suitability of financial assets." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.577, + 0.915, + 0.813 + ], + "angle": 0, + "content": "This correlation is an average over all the participants in the user study, regardless of how effective their preference elicitation was in stage 1. Hence, we might ask whether the low correlation is due to the LLM-advisor being confused by poor preference elicitation data. To explore this, Table 4 also reports investor decision-making effectiveness stratified based on whether stage 1 was successful (column 4) or not (column 5). As expected, we see a statistically significant increase in investor decision-making effectiveness when preference elicitation was successful when compared to non-personalized sessions (0.481 vs. 0.110). More concerningly, we also see the LLM-advisor has a strong negative influence on the investors' decision-making capability if preference elicitation fails, as illustrated by the negative correlations with the expert in column 5. This result highlights both that effective preference elicitation is crucial, but also that the LLM-advisor can easily influence the investor into making poor decisions, as the human is heavily reliant on the agent to navigate the relatively unfamiliar financial information space." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.82, + 0.915, + 0.862 + ], + "angle": 0, + "content": "5.2.3 Participant Assessment of the Advisor: So far we have demonstrated that there is a large difference between a non-personalized LLM-advisor and a personalized one, in terms of how they can" + }, + { + "type": "page_footnote", + "bbox": [ + 0.082, + 0.873, + 0.483, + 0.897 + ], + "angle": 0, + "content": "5Note we cannot have a personalized variant here, as the personalization evidence is derived from this stage." + }, + { + "type": "page_footnote", + "bbox": [ + 0.514, + 0.873, + 0.913, + 0.897 + ], + "angle": 0, + "content": "6We define that an elicitation session is successful if more than 50% of the investor's preferences were correctly captured" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.282, + 0.088 + ], + "angle": 0, + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + }, + { + "type": "header", + "bbox": [ + 0.83, + 0.076, + 0.912, + 0.088 + ], + "angle": 0, + "content": "Takayanagi et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.105, + 0.916, + 0.203 + ], + "angle": 0, + "content": "Table 5: Average participant users' response to advisor assessment questionnaire under different advisor conditions. Columns labeled with advisor condition (Baseline, +Pers., +Cons., +Extr.) contain a 7-point Likert scale (higher is better). \"p\" column contains Wilcoxon signed-rank test \\( p \\)-values for (RQ2) Baseline vs. +Personalized (Pers.), and (RQ3) +Conscientious (Cons.) vs. +Extroverted (Extr), for both the full data (All) and the subset where the elicitation accuracy is above 0.5. \"Successful Elicitation\" refers to the subset where elicitation accuracy was ≥ 0.5. For RQ2, this subset consists of pairs for which +Pers elicitation is successful, while for RQ3, it consists of pairs for which both +Extr and +Cons elicitation are successful. Boldface indicates significant effects with † for \\( p < 0.1 \\) and ‡ for \\( p < 0.05 \\)." + }, + { + "type": "table", + "bbox": [ + 0.126, + 0.208, + 0.875, + 0.347 + ], + "angle": 0, + "content": "
Response Dimension(RQ2) Baseline vs. +Personalized(RQ3) +Conscientious vs. +Extroverted
AllSuccessful ElicitationAllSuccessful Elicitation
Baseline+Pers.pBaseline+Pers.p+Cons.+Extr.p+Cons.+Extr.p
Perceived Personalization5.7595.7240.8385.7625.9050.7515.5005.5000.6635.5885.7060.941
Emotional Trust5.1035.2410.4465.1435.3330.5375.0385.1540.6004.7065.2350.034‡
Trust in Competence5.6905.6900.8175.8105.8570.7825.9626.0770.5386.0006.0001.000
Intention to Use5.3105.4830.5055.4295.7140.1664.8855.4620.005‡4.9415.5880.013‡
Perceived Usefulness5.2415.5170.1835.3815.8100.1945.4235.5380.4255.1765.1180.968
Overall Satisfaction5.3455.6900.1165.4295.8100.098†5.2695.5770.1795.1185.5290.244
Information Provision5.5175.9660.026‡5.7146.1430.053†5.6925.6540.9535.5885.7650.490
" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.366, + 0.481, + 0.393 + ], + "angle": 0, + "content": "alter the decision-making of the investor/participant. But can the participant tell the differences between them?" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.394, + 0.483, + 0.656 + ], + "angle": 0, + "content": "Table 5 reports the aggregation of the qualitative data we collected from each participant after they finished interacting with each LLM-advisor in terms of 7 dimensions, where we start by focusing on the RQ2-All columns, i.e. comparing the baseline and personalized variants. The important observation to note here is that the participant preference scores for both variants are statistically indistinguishable, except under the quality of information provision criteria. This means that our participants cannot tell if the LLM-advisor is personalizing to them, and trust the worse agent just as much as the better one. Furthermore, if we consider the best case scenario where the preference elicitation was successful (RQ2 Successful Elicitation columns) we observe the same pattern, even though the difference between the baseline and the personalized variants in terms of the effect it has on the participant decision-making is more pronounced. This underlines one of the core risks of using LLM-advisors in the financial domain; since our users are inherently inexpert they lack the fundamental skills to judge to what extent the LLM is providing good advice, meaning that there is no safety net if the LLM makes a mistake." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.663, + 0.483, + 0.844 + ], + "angle": 0, + "content": "To answer RQ2, our results show that a personalized LLM-advisor is able to provide useful financial advice when it has accurate information regarding the preferences of the investor. This is demonstrated by better decision-making capability by participants using the personalized advisor in comparison to the non-personalized one. However, we also identified two important challenges to adoption. First, the impact the LLM-advisor has is strongly tied to the quality of the preference elicitation data provided, where poor preference elicitation will cause the agent to actively direct the investor to the wrong assets. Second, while the participants were positive regarding the LLM-advisors across all questionnaire criteria, they were not able to consistently tell the difference between good and bad advisors; leading to an increased risk of humans acting on bad advice." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.364, + 0.842, + 0.38 + ], + "angle": 0, + "content": "5.3 RQ3: Effectiveness of personalities" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.383, + 0.916, + 0.577 + ], + "angle": 0, + "content": "Once we have confirmed the utility of personalization for LLM-advisors, we now study the effect that the personality of the advisor has on users' financial information-seeking. As previous studies have shown [32], chatbot personality can affect the way humans interact with the chatbot, and therefore affect the effectiveness and perception of LLM-advisors. To understand whether personality affects LLM financial advisors, we compare two personalized LLM-advisors on which we have injected a pre-defined personality: an extroverted personality and a conscientious personality. While we could consider the personalized LLM-advisor discussed in Section 5.2 as a third distinct personality (the base LLM personality of the LLM), we shall not compare it with our personality-injected models, because different sets of participants were used in the personalization study and the advisor-persona study." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.587, + 0.915, + 0.682 + ], + "angle": 0, + "content": "5.3.1 Decision-making Effectiveness: We first examine the impact of adding personality to the advisors on the decision-making process, by measuring the capacity of the participants to correctly rank the assets (as previously done in Section 5.2). As a primary metric, we again use the average Spearman's Rho correlation between the investor ranking and the ground truth ranking reported in Table 4 rows 3 (extroverted advisor) and row 4 (conscientious advisor)." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.684, + 0.915, + 0.821 + ], + "angle": 0, + "content": "We first observe the results for the full set of participants in the user study. Interestingly, we observe a difference between the two advisors, with the conscientious LLM-advisor providing better guidance than the extroverted one (0.26 vs. 0.122). This observation is consistent when we restrict our analysis to those cases where the preference elicitation is successful. While, expectedly, the effectiveness of both advisors improves when the elicitation is successful (0.243 vs. 0.122 in the case of the extroverted advisor and 0.365 vs. 0.26 in the case of the conscientious one), the conscientious advisor has an advantage over the extroverted one (0.365 vs. 0.26)." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.822, + 0.914, + 0.863 + ], + "angle": 0, + "content": "These results highlight that providing different personalities to an LLM-advisor can notably impact the capacity of the advisor to provide useful information to the investors." + }, + { + "type": "page_footnote", + "bbox": [ + 0.515, + 0.884, + 0.806, + 0.896 + ], + "angle": 0, + "content": "Refer to Section 3.3 for a full description of each personality." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.077, + 0.406, + 0.088 + ], + "angle": 0, + "content": "Are Generative AI Agents Effective Personalized Financial Advisors?" + }, + { + "type": "header", + "bbox": [ + 0.717, + 0.077, + 0.912, + 0.088 + ], + "angle": 0, + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.108, + 0.482, + 0.19 + ], + "angle": 0, + "content": "5.3.2 Participant Assessment of the Advisor: We have observed so far that the use of different personalities affects the user decision-making process. But how do these personalities affect the perception that users have of the LLM-advisor? We observe this in Table 5, in terms of the seven dimensions captured during the advisor assessment questionnaire." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.19, + 0.482, + 0.342 + ], + "angle": 0, + "content": "We first look at the RQ3-All columns, comparing the two personalities. Notably, for the majority of the dimensions, users barely distinguish between both systems. The only answer where we observe a statistically significant difference is the intention to use the system in the future. Surprisingly, despite providing worse guidance to the investor, participants expressed a higher interest in using the extroverted advisor than the conscientious one. When we limit our study to those participants who experienced a successful preference elicitation in both advisor variants, this issue is stressed, as those users also develop a significantly greater emotional trust with the extroverted advisor." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.343, + 0.482, + 0.398 + ], + "angle": 0, + "content": "These observations are worrisome, as they reveal that the personality of a financial advisor cannot only affect the quality of the advice but also lead the investors to trust more on those systems providing worse advice." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.408, + 0.482, + 0.602 + ], + "angle": 0, + "content": "5.3.3 Differences in language: To further understand how personalities affect financial advisory, we analyze the differences in the linguistic patterns provided by extroverted and conscientious advisors. Analyzing participants' reported overall experience from the exit questionnaires in the advisor persona study, over \\(20\\%\\) (7 of 31) described the extroverted advisor as clear, assertive, and cheerful while perceiving the conscientious advisor as straightforward, analytical, yet less confident. Therefore, to quantify the linguistic differences in the advisors, we conduct a financial sentiment analysis of the utterances generated by each advisor. For each utterance, we count the occurrences of positive, negative, and uncertain words from the Loughran and McDonald Financial Sentiment Dictionary [22]. We normalize these counts by the length of the sentences and average the results across all dialogues." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.603, + 0.482, + 0.81 + ], + "angle": 0, + "content": "Figure 4 shows the results, showing the extroverted sentiment scores in blue, and the conscientious scores in orange. For the three sentiment dimensions, differences between advisors are statistically significant (Welch's t-test with \\( p < 0.01 \\)). Figure 4 shows that extroverted advisors tend to use more positive language in their interactions, while conscientious advisors prefer negative and uncertain tones. Through manual analysis of the conversation, we observe that this results in the extroverted advisor focusing on the positive aspects of investments while overlooking serious drawbacks, whereas the conscientious advisor provides a more balanced view of the assets. Because of this, participants guided by conscientious advisors may make more well-informed financial decisions. Meanwhile, the positivity of the extroverted advisor seems more appreciated by the users, which is reflected in higher advisor assessment scores from the post-discussion questionnaire." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.817, + 0.482, + 0.858 + ], + "angle": 0, + "content": "To answer RQ3, our results show that different personalities of a personalized LLM-advisor can affect the utility of the provided advice. This is demonstrated by the better decisions of the study" + }, + { + "type": "image", + "bbox": [ + 0.548, + 0.105, + 0.88, + 0.244 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.518, + 0.261, + 0.912, + 0.314 + ], + "angle": 0, + "content": "Figure 4: Average sentiment scores by advisor personality (extroverted in light blue and conscientious in pastel orange) and category (Positive, Negative, and Uncertainty). Error bars indicate the standard deviation." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.337, + 0.913, + 0.404 + ], + "angle": 0, + "content": "participants when using an advisor with a conscientious personality than when using an advisor with an extroverted personality. Moreover, the personality of the advisor affects the perception of humans towards the system, and it has the risk of leading investors to further trust those systems that provide worse advice." + }, + { + "type": "title", + "bbox": [ + 0.518, + 0.422, + 0.638, + 0.435 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.44, + 0.912, + 0.495 + ], + "angle": 0, + "content": "In this paper, we have conducted a lab-based user study to examine how effective large language models are as financial advisors. We focus on three core challenges: preference elicitation, investment personalization, and advisor personality." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.495, + 0.913, + 0.66 + ], + "angle": 0, + "content": "First, our analysis shows that LLMs are effective tools for preference elicitation through conversation. In a majority of cases, they are capable of obtaining investor's preferences with an accuracy close to or equivalent to that of an expert human advisor. However, there are some clear failure cases, as LLMs are vulnerable to contradictory statements and hallucinations, which, in the case of complex investor profiles, can decrease the accuracy of the elicitation to random levels. Although LLMs are promising for elicitation, in a complex domain like finance, investors do not always fully understand their own preferences (or they have difficulties expressing them). Therefore, future work should explore the development of LLM-advisors capable of resolving conflicting user needs." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.661, + 0.913, + 0.757 + ], + "angle": 0, + "content": "Second, personalizing LLMs to provide investment advice can improve the decisions made by the investors, but only when the personalized LLM-advisor receives accurate information about the investor's preferences. If the preference elicitation is not successful, the agent actively directs the investors to the wrong assets on which to invest. This underscores how crucial a good preference elicitation is for providing useful financial advice." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.758, + 0.913, + 0.896 + ], + "angle": 0, + "content": "Finally, our results suggest that investors are not necessarily aware of what constitutes good financial advice, and therefore, are vulnerable to acting on bad advice provided by LLMs. In the comparison between a non-personalized and a personalized LLM-advisor, although the personalized system led to better decisions, participants were unable to distinguish between the systems. More worryingly, when comparing two personalized advisors with extroverted and conscientious personalities, we observed that, even though the extroverted advisor provided lower-quality advice, participants trusted this advisor more than the conscientious one." + }, + { + "type": "page_footnote", + "bbox": [ + 0.086, + 0.884, + 0.411, + 0.896 + ], + "angle": 0, + "content": "8Participants were unaware of the specific personas during the study." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.283, + 0.088 + ], + "angle": 0, + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + }, + { + "type": "header", + "bbox": [ + 0.83, + 0.076, + 0.913, + 0.088 + ], + "angle": 0, + "content": "Takayanagi et al." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.107, + 0.483, + 0.288 + ], + "angle": 0, + "content": "Our findings highlight that, while personalized LLM-advisors represent a promising research direction, their use in high-stakes domains like finance is not free of risks: due to the limitations of LLMs at capturing complex investment preferences, and the difficulty of investors to discern whether the advice they receive truly serves their interests, LLMs have a notable risk to drive investors to bad financial assets (leading not only to a low satisfaction but also to potentially large monetary losses). However, these drawbacks open interesting research directions not only from a system perspective, but also from a human-centered approach: automated advisory development where we do not just focus on improving the quality of automated systems to guide investors, but also on how the investors will adopt, trust and interact with these AI agents [6, 20]." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.324, + 0.178, + 0.338 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.34, + 0.483, + 0.362 + ], + "angle": 0, + "content": "[1] James E. Allen, Curry I. Guinn, and Eric Horvitz. 1999. Mixed-initiative interaction. IEEE Intelligent Systems and their Applications 14, 5 (1999), 14-23." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.362, + 0.483, + 0.402 + ], + "angle": 0, + "content": "[2] Ashay Argal, Siddharth Gupta, Ajay Modi, Pratik Pandey, Simon Shim, and Chang Choo. 2018. Intelligent travel chatbot for predictive recommendation in echo platform. In 2018 IEEE 8th Annual Computing and Communication Workshop and Conference (CCWC 2018). IEEE, 176-183." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.402, + 0.482, + 0.442 + ], + "angle": 0, + "content": "[3] Andreas Bucher, Mateusz Dolata, Sven Eckhardt, Dario Staehelin, and Gerhard Schwabe. 2024. Talking to Multi-Party Conversational Agents in Advisory Services: Command-based vs. Conversational Interactions. Proceedings of the ACM on Human-Computer Interaction 8, GROUP (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.443, + 0.482, + 0.483 + ], + "angle": 0, + "content": "[4] Wanling Cai, Yucheng Jin, and Li Chen. 2022. Impacts of personal characteristics on user trust in conversational recommender systems. In Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems (CHI 2022). Article 489, 14 pages." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.483, + 0.482, + 0.503 + ], + "angle": 0, + "content": "[5] Gary Charness, Uri Gneezy, and Alex Imas. 2013. Experimental methods: Eliciting risk preferences. Journal of Economic Behavior & Organization 87 (2013), 43-51." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.503, + 0.482, + 0.523 + ], + "angle": 0, + "content": "[6] Erin K. Chiou and John D. Lee. 2023. Trusting automation: Designing for responsivity and resilience. Human factors 65, 1 (2023), 137-165." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.523, + 0.482, + 0.563 + ], + "angle": 0, + "content": "[7] Konstantina Christakopoulou, Filip Radlinski, and Katja Hofmann. 2016. Towards conversational recommender systems. In Proceedings of the 22nd ACM SIGKDD international conference on knowledge discovery and data mining (KDD 2016). 815-824." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.563, + 0.482, + 0.593 + ], + "angle": 0, + "content": "[8] Berardina De Carolis, Marco de Gemmis, Pasquale Lops, and Giuseppe Palestra. 2017. Recognizing users feedback from non-verbal communicative acts in conversational recommender systems. Pattern Recognition Letters 99 (2017), 87-95." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.593, + 0.482, + 0.633 + ], + "angle": 0, + "content": "[9] Mateusz Dolata, Doris Agotai, Simon Schubiger, and Gerhard Schwabe. 2019. Pen-and-paper Rituals in Service Interaction: Combining High-touch and High-tech in Financial Advisory Encounters. Proceedings of the ACM on Human-Computer Interaction 3, CSCW, Article 224 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.633, + 0.483, + 0.654 + ], + "angle": 0, + "content": "[10] Eugene F Fama and Kenneth R French. 1998. Value versus growth: The international evidence. The journal of finance 53, 6 (1998), 1975-1999." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.654, + 0.482, + 0.693 + ], + "angle": 0, + "content": "[11] Christian Hildebrand and Anouk Bergner. 2021. Conversational robo advisors as surrogates of trust: onboarding experience, firm perception, and consumer financial decision making. Journal of the Academy of Marketing Science 49, 4 (2021), 659-676." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.693, + 0.482, + 0.714 + ], + "angle": 0, + "content": "[12] Dietmar Jannach, Ahtsham Manzoor, Wanling Cai, and Li Chen. 2021. A survey on conversational recommender systems. Comput. Surveys 54, 5 (2021), 1-36." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.714, + 0.482, + 0.754 + ], + "angle": 0, + "content": "[13] Guangyuan Jiang, Manjie Xu, Song-Chun Zhu, Wenjuan Han, Chi Zhang, and Yixin Zhu. 2024. Evaluating and inducing personality in pre-trained language models. In Proceedings of the 37th Conference on Neural Information Processing Systems (NeurIPS 2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.755, + 0.482, + 0.793 + ], + "angle": 0, + "content": "[14] Hang Jiang, Xiajie Zhang, Xubo Cao, Cynthia Breazeal, Deb Roy, and Jad Kabbara. 2024. PersonalLLM: Investigating the Ability of Large Language Models to Express Personality Traits. In Findings of the Association for Computational Linguistics: NAACL 2024. 3605-3627." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.794, + 0.482, + 0.835 + ], + "angle": 0, + "content": "[15] Francis M. Kinniry Jr., Colleen M. Jaconetti, Michael A. DijJoseph, Yan Zilbering, Donald G. Bennyhoff, and Georgina Yarwood. 2020. Putting a value on your value: Quantifying Vanguard Adviser's Alpha in the UK. Technical Report. The Vanguard Group, Valley Forge, Pennsylvania, USA." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.835, + 0.482, + 0.865 + ], + "angle": 0, + "content": "[16] Sherrie Y.X. Komiak and Izak Benbasat. 2006. The effects of personalization and familiarity on trust and adoption of recommendation agents. MIS quarterly (2006), 941-960." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.865, + 0.482, + 0.896 + ], + "angle": 0, + "content": "[17] Ivica Kostric, Krisztian Balog, and Filip Radlinski. 2021. Soliciting user preferences in conversational recommender systems via usage-related questions. In Proceedings of the 15th ACM Conference on Recommender Systems. 724-729." + }, + { + "type": "list", + "bbox": [ + 0.087, + 0.34, + 0.483, + 0.896 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.109, + 0.914, + 0.15 + ], + "angle": 0, + "content": "[18] Kausik Lakkaraju, Sara E. Jones, Sai Krishna Revanth Vuruma, Vishal Pallagani, Bharath C. Muppasani, and Biplav Srivastava. 2023. LLMs for Financial Advise-ment: A Fairness and Efficacy Study in Personal Decision Making. In Proceedings of the 4th ACM Conference on AI in Finance (ICAIF 2023). 100-107." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.15, + 0.914, + 0.18 + ], + "angle": 0, + "content": "[19] Cong Li. 2016. When does web-based personalization really work? The distinction between actual personalization and perceived personalization. Computers in human behavior 54 (2016), 25-33." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.18, + 0.914, + 0.22 + ], + "angle": 0, + "content": "[20] Zhuoyan Li, Zhuoran Lu, and Ming Yin. 2023. Modeling human trust and reliance in AI-assisted decision making: a markovian approach. In Proceedings of the 37th AAAI Conference on Artificial Intelligence (AAAI 2023/IAAI 2023/EAAI 2023). Article 679." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.221, + 0.914, + 0.251 + ], + "angle": 0, + "content": "[21] Andrew W. Lo and Jillian Ross. 2024. Can ChatGPT Plan Your Retirement?: Generative AI and Financial Advice. Harvard Data Science Review (2024). Issue Special Issue 5." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.251, + 0.914, + 0.271 + ], + "angle": 0, + "content": "[22] Tim Loughran and Bill McDonald. 2011. When is a liability not a liability? Textual analysis, dictionaries, and 10-Ks. The Journal of finance 66, 1 (2011), 35-65." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.271, + 0.914, + 0.291 + ], + "angle": 0, + "content": "[23] Robert R. McCrae and Oliver P. John. 1992. An introduction to the five-factor model and its applications. Journal of personality 60 2 (1992), 175-215." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.291, + 0.914, + 0.331 + ], + "angle": 0, + "content": "[24] Sourav Medya, Mohammad Rasoolinejad, Yang Yang, and Brian Uzzi. 2022. An Exploratory Study of Stock Price Movements from Earnings Calls. In Companion Proceedings of the Web Conference 2022 (WWW 2022). Association for Computing Machinery, 20-31." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.331, + 0.914, + 0.361 + ], + "angle": 0, + "content": "[25] Pearl Pu, Li Chen, and Rong Hu. 2011. A user-centric evaluation framework for recommender systems. In Proceedings of the 5th ACM conference on Recommender Systems (RecSys 2011). 157-164." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.361, + 0.914, + 0.402 + ], + "angle": 0, + "content": "[26] Filip Radlinski, Krisztian Balog, Bill Byrne, and Karthik Krishnamoorthi. 2019. Coached conversational preference elicitation: A case study in understanding movie preferences. In Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue (SIGDIAL 2019). 353-360." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.402, + 0.914, + 0.432 + ], + "angle": 0, + "content": "[27] Filip Radlinski and Nick Craswell. 2017. A theoretical framework for conversational search. In Proceedings of the 2nd Conference on Human Information Interaction and Retrieval (CHIIR 2017). 117-126." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.432, + 0.914, + 0.483 + ], + "angle": 0, + "content": "[28] Oscar Sainz, Jon Campos, Iker Garcia-Ferrero, Julien Etxaniz, Oier Lopez de Lacalle, and Eneko Agirre. 2023. NLP Evaluation in trouble: On the Need to Measure LLM Data Contamination for each Benchmark. In Findings of the Association for Computational Linguistics: EMNLP 2023, Houda Bouamor, Juan Pino, and Kalika Bali (Eds.). Association for Computational Linguistics, 10776-10787." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.483, + 0.914, + 0.502 + ], + "angle": 0, + "content": "[29] Tetsuya Sakai. 2018. Laboratory experiments in information retrieval. The information retrieval series 40 (2018), 4." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.503, + 0.914, + 0.543 + ], + "angle": 0, + "content": "[30] Javier Sanz-Cruzado, Edward Richards, and Richard McCreadie. 2024. FAR-AI: A Modular Platform for Investment Recommendation in the Financial Domain. In Proceedings of the 46th European Conference on Information Retrieval (ECIR 2024), Part V. Springer-Verlag, Glasgow, United Kingdom, 267-271." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.543, + 0.914, + 0.583 + ], + "angle": 0, + "content": "[31] Yunfan Shao, Linyang Li, Junqi Dai, and Xipeng Qiu. 2023. Character-LLM: A Trainable Agent for Role-Playing. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing (EMNLP 2023). Association for Computational Linguistics, 13153-13187." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.583, + 0.914, + 0.613 + ], + "angle": 0, + "content": "[32] Tuva Lunde Smestad and Frode Volden. 2019. Chatbot personalities matters: improving the user experience of chatbot interfaces. In 5th International Conference Internet Science: (INSCI 2018). Springer, 170-181." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.613, + 0.914, + 0.633 + ], + "angle": 0, + "content": "[33] David J Streich. 2023. Risk preference elicitation and financial advice taking. Journal of Behavioral Finance 24, 3 (2023), 259-275." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.633, + 0.914, + 0.663 + ], + "angle": 0, + "content": "[34] Yueming Sun and Yi Zhang. 2018. Conversational recommender system. In Proceedings of the 41th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR 2018), 235-244." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.663, + 0.914, + 0.714 + ], + "angle": 0, + "content": "[35] Takehiro Takayanagi, Kiyoshi Izumi, Atsuo Kato, Naoyuki Tsunedomi, and Yukina Abe. 2023. Personalized Stock Recommendation with Investors' Attention and Contextual Information. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR 2023). Association for Computing Machinery, 3339-3343." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.714, + 0.914, + 0.764 + ], + "angle": 0, + "content": "[36] Johanne R. Trippas, Sara Fahad Dawood Al Lawati, Joel Mackenzie, and Luke Gallagher. 2024. What do Users Really Ask Large Language Models? An Initial Log Analysis of Google Bard Interactions in the Wild. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR 2024). 2703-2707." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.764, + 0.914, + 0.814 + ], + "angle": 0, + "content": "[37] Johanne R. Trippas, Luke Gallagher, and Joel Mackenzie. 2024. Re-evaluating the Command-and-Control Paradigm in Conversational Search Interactions. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management (CIKM 2024). Association for Computing Machinery, 2260-2270." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.814, + 0.914, + 0.845 + ], + "angle": 0, + "content": "[38] Patchara Vanichvasin. 2021. Chatbot Development as a Digital Learning Tool to Increase Students' Research Knowledge. International Education Studies 14, 2 (2021), 44-53." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.845, + 0.914, + 0.874 + ], + "angle": 0, + "content": "[39] Xuena Wang, Xueting Li, Zi Yin, Yue Wu, and Jia Liu. 2023. Emotional intelligence of large language models. Journal of Pacific Rim Psychology 17 (2023), 18344909231213958." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.874, + 0.914, + 0.896 + ], + "angle": 0, + "content": "[40] Pontus Wärnestäl. 2005. User evaluation of a conversational recommender system. In Proceedings of the 4th Workshop on Knowledge and Reasoning in Practical" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.109, + 0.914, + 0.896 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.406, + 0.087 + ], + "angle": 0, + "content": "Are Generative AI Agents Effective Personalized Financial Advisors?" + }, + { + "type": "header", + "bbox": [ + 0.716, + 0.076, + 0.913, + 0.088 + ], + "angle": 0, + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + }, + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.109, + 0.195, + 0.12 + ], + "angle": 0, + "content": "Dialogue Systems." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.12, + 0.484, + 0.15 + ], + "angle": 0, + "content": "[41] Hamed Zamani, Johanne R Trippas, Jeff Dalton, Filip Radlinski, et al. 2023. Conversational information seeking. Foundations and Trends in Information Retrieval 17, 3-4 (2023), 244-456." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.15, + 0.484, + 0.181 + ], + "angle": 0, + "content": "[42] Markus Zanker, Laurens Rook, and Dietmar Jannach. 2019. Measuring the impact of online personalisation: Past, present and future. International Journal of Human-Computer Studies 131 (2019), 160–168." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.181, + 0.484, + 0.222 + ], + "angle": 0, + "content": "[43] Yongfeng Zhang, Xu Chen, Qingyao Ai, Liu Yang, and W Bruce Croft. 2018. Towards conversational search and recommendation: System ask, user respond. In Proceedings of the 27th ACM International Conference on Information and Knowledge Management (CIKM 2018). 177-186." + }, + { + "type": "list", + "bbox": [ + 0.085, + 0.109, + 0.484, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.109, + 0.914, + 0.15 + ], + "angle": 0, + "content": "[44] Huaqin Zhao, Zhengliang Liu, Zihao Wu, Yiwei Li, Tianze Yang, Peng Shu, Shaochen Xu, Haixing Dai, Lin Zhao, Gengchen Mai, et al. 2024. Revolutionizing Finance with LLMs: An Overview of Applications and Insights. arXiv preprint arXiv:2401.11641 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.15, + 0.915, + 0.181 + ], + "angle": 0, + "content": "[45] Dávid Zibriczky. 2016. Recommender systems meet finance: a literature review. In Proceedings of the 2nd International Workshop on Personalization & Recommender Systems in Financial Services (FinRec 2016). 1-10." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.181, + 0.915, + 0.212 + ], + "angle": 0, + "content": "[46] Liv Ziegfeld, Daan Di Scala, and Anita HM Cremers. 2025. The effect of preference elicitation methods on the user experience in conversational recommender systems. Computer Speech & Language 89 (2025), 101696." + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.109, + 0.915, + 0.212 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05862/f5f20bac-767d-4260-82e6-943416d1d631_origin.pdf b/data/2025/2504_05xxx/2504.05862/f5f20bac-767d-4260-82e6-943416d1d631_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..930acbeaaaa43611c2b838399ee35c8de2f72a0c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05862/f5f20bac-767d-4260-82e6-943416d1d631_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cdaf4fa05d7465a7e367fcfbd9ec22fb51e20f4cec8c4f9943c21e7f83ef5270 +size 945776 diff --git a/data/2025/2504_05xxx/2504.05862/full.md b/data/2025/2504_05xxx/2504.05862/full.md new file mode 100644 index 0000000000000000000000000000000000000000..91ab0820d468b744d58f8a4335fb8852c3b1d849 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05862/full.md @@ -0,0 +1,377 @@ +# Are Generative AI Agents Effective Personalized Financial Advisors? + +Takehiro Takayanagi + +takayanagi-takehiro590@g.ecc.u- + +tokyo.ac.jp + +The University of Tokyo + +Tokyo, Japan + +Kiyoshi Izumi + +izumi@sys.t.u-tokyo.ac.jp + +The University of Tokyo + +Tokyo, Japan + +Javier Sanz-Cruzado + +javier.sanz- + +cruzadopuig@glasgow.ac.uk + +University of Glasgow + +Glasgow, United Kingdom + +Richard McCreadie + +richard.mccreadie@glasgow.ac.uk + +University of Glasgow + +Glasgow, United Kingdom + +# Abstract + +Large language model-based agents are becoming increasingly popular as a low-cost mechanism to provide personalized, conversational advice, and have demonstrated impressive capabilities in relatively simple scenarios, such as movie recommendations. But how do these agents perform in complex high-stakes domains, where domain expertise is essential and mistakes carry substantial risk? This paper investigates the effectiveness of LLM-advisors in the finance domain, focusing on three distinct challenges: (1) eliciting user preferences when users themselves may be unsure of their needs, (2) providing personalized guidance for diverse investment preferences, and (3) leveraging advisor personality to build relationships and foster trust. Via a lab-based user study with 64 participants, we show that LLM-advisors often match human advisor performance when eliciting preferences, although they can struggle to resolve conflicting user needs. When providing personalized advice, the LLM was able to positively influence user behavior, but demonstrated clear failure modes. Our results show that accurate preference elicitation is key, otherwise, the LLM-advisor has little impact, or can even direct the investor toward unsuitable assets. More worryingly, users appear insensitive to the quality of advice being given, or worse these can have an inverse relationship. Indeed, users reported a preference for and increased satisfaction as well as emotional trust with LLMs adopting an extroverted persona, even though those agents provided worse advice. + +# CCS Concepts + +- Information systems $\rightarrow$ Decision support systems; Personalization. + +# Keywords + +large language models, financial advisor, user study, generative AI + +Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org. + +SIGIR 2025, July 13-18, 2018, Padua, Italy + +© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM. + +ACM ISBN 978-x-xxxxx-xxxxx-x/YY/MM + +https://doi.org/10.1145/nnnnnnn.nnnnnnn + +Iadh Ounis + +iadh.ounis@glasgow.ac.uk + +University of Glasgow + +Glasgow, United Kingdom + +![](images/4cee9a16e36b20686fedd91c3c5c33e55f7ed2b3b09d0c248c6e09e369f3f9f7.jpg) +Figure 1: Conceptual illustration of an LLM-advisor with two stages: (1) Preference Elicitation and (2) Advisory Discussion. + +# ACM Reference Format: + +Takehiro Takayanagi, Kiyoshi Izumi, Javier Sanz-Cruzado, Richard McCreadie, and Iadh Ounis. 2025. Are Generative AI Agents Effective Personalized Financial Advisors?. In Proceedings of SIGIR 2025. ACM, New York, NY, USA, 11 pages. https://doi.org/10.1145/nnnnnnn.nnnnnnn + +# 1 Introduction + +Personalized advice plays a crucial role in our society, particularly in complex and high-stakes domains like healthcare and finance. Advisors and professionals in these fields use their expertise to offer personalized guidance and emotional support to their clients, leveraging people's specific preferences and/or circumstances. However, advisory services are often provided at a high cost, effectively excluding a large portion of the population from this critical advice. In the financial domain, to mitigate this issue, automated decision support systems have been widely studied, with a special focus on investment-related predictions, such as financial asset recommendations [30, 35]. + +Recent advances in natural language processing and large language models (LLMs) have significantly accelerated the development of conversational agents, presenting the potential to function as personalized assistants for information-seeking and decision-making [41]. These agents can now leverage multi-turn dialogues, enabling dynamic, mixed-initiative interactions where both users and systems can take the lead in conversations [1]. This progression has expanded the application of conversational agents to various tasks, such as recommendation, question answering, and search [12, 27, 34, 41]. + +The application of these conversational agents for financial decision-making represents a much more complex scenario than others like movie recommendations, because users are not necessarily familiar with the basic terminology and concepts in this space, and mistakes carry a substantial risk that can lead to large monetary losses. While there is a growing interest in building these conversational assistants to provide automated financial advice [21], previous work has mostly targeted agents capable of handling simple inquiries [18, 36, 37]. Compared to these simple systems, helping users navigate financial decisions and market uncertainties poses a much greater challenge. Therefore, it is not yet clear how to develop systems that effectively support complex financial information-seeking and decision-making tasks. + +This work aims to close this gap by exploring the effectiveness of LLMs to act as personalized financial advisory agents. In particular, we focus on three problems: (a) eliciting investor preferences through interactive conversations, (b) providing personalized guidance to help users determine whether particular financial assets align with their preferences, and (c) leveraging the personality of the advisor to foster trust on the advisor. + +First, the financial literature emphasizes that eliciting user preferences is central to delivering suitable advice [33]. However, it remains unclear whether current conversational technologies, particularly those powered by LLMs, can correctly elicit user preferences in specialized domains where users struggle to articulate their needs. Our work addresses this challenge in the context of financial services. + +Second, although personalization is widely regarded as important in the financial decision-support literature [30, 35], its value in a conversational setting remains uncertain. In particular, we explore whether tailoring dialogue around a user's profile and context improves financial decision-making. Additionally, we also explore how personalization influences user perceptions of the advisor, in terms of aspects like trust and satisfaction. + +Finally, in personalized advisory settings within high-stakes domains, the relationship and trust between the client and advisor play a crucial role [21]. Research on conversational agents suggests that agent personality significantly affects users' perceptions of the system [4, 32]. However, it remains unclear how an advisor's personality in the financial domain influences both the quality of users' financial decisions and their overall experience. + +To summarize, in this paper, we explore the following questions: + +- RQ1: Can LLM-advisors effectively elicit user preferences through conversation? +- RQ2: Does personalization lead to better investment decisions and a more positive advisor assessment? +- RQ3: Do different personality traits affect decision quality and advisor assessment? + +To address these questions, we conduct a lab-based user study that explores the effectiveness of LLMs as interactive conversational financial advisors, on which we simulate realistic investment scenarios using investor narratives and stock relevance scores curated by financial experts. Figure 1 illustrates an example conversation with the advisor, divided into two stages: first, the LLM-advisor attempts to capture the investor preferences through conversation; in + +the second stage, given an individual asset, the advisor provides information about it to the investor, including how the asset matches (or not) the investor's preferences. To answer the different questions, we compare different configurations of the LLM-advisor: first, we compare personalized vs. non-personalized advisors, and, then, we compare two personalized advisors with distinct personalities. + +# 2 Related Work + +# 2.1 Personalization and Preference Elicitation + +Information systems, especially those focused on search and recommendation benefit from personalization [16]. Specifically, personalization techniques play a crucial role in enhancing user experience [19, 25, 42]. Interactive approaches, such as conversational preference elicitation represent the frontier of personalization. This problem has received growing attention, as advances in generative AI now provide a functional mechanism to collect user preferences dynamically in a free-form manner [41]. This interactive approach can capture more diverse and targeted insights than static approaches like questionnaires [7, 12, 26, 27, 34]. Indeed, recent studies have proposed various methods for effective conversational preference elicitation [34, 43], as well as user studies on the perceived quality of this process in domains such as e-commerce, movies, fashion, books, travel, and restaurant recommendations [2, 8, 17, 26, 34, 46]. + +However, we argue that for some important domains, trying to directly collect preferences is insufficient. An implicit assumption of these studies is that if directly asked, the user will be able to accurately express their preferences. It is reasonable to expect that this assumption would hold for scenarios like movie recommendation; we can ask a user "do you like horror movies?" and expect a useful response. On the other hand, this will not hold for complex tasks, where the user lacks the knowledge to form an accurate response [12, 40]. For instance, in an investment context if we asked "do you prefer ETFs or Bonds?", it is not clear that an inexperienced user would be able to produce a meaningful answer. In these cases, an ideal agent needs to fill the gaps in the user knowledge through conversation, as well as infer the user preferences across multiple (often uncertain) user responses. But how effective are generative AI agents at this complex task? This paper aims to answer that question for the domain of financial advisory; a particularly challenging domain given its technical nature and high risks if done poorly. + +# 2.2 Financial advisory + +In the financial domain, advisors help individuals manage their personal finances by offering guidance on investments and assisting with decision-making. While financial advisors can be beneficial, their services often come at a high cost, making them unaffordable for many people. To mitigate this issue, automated (nonconversational) financial decision support systems such as financial recommender systems have been widely studied [45]. The majority of research in this area has been focused on how to find profitable assets (i.e. those that will make money if we invest in them). These works assume a simplified user-model, where an investor is only concerned with maximizing return-on-investment over a fixed period of time [30, 35]. These studies frame financial advisory as a ranking problem, where the goal is to rank financial assets for a user + +![](images/386ffde58c6627098c75cf4e90481d55e10c3880e9b1334aba927ea8a1c86fd4.jpg) +Figure 2: Example of an investor profile, investment preferences, and ground truth ranking. Dashed line components are used for evaluation (and therefore, they are not shown to the user/LLM). + +over a specified time period. However, a recent study suggests that a large part of the value offered by human financial advisors stems from their ability to personalize investment guidance to clients' specific needs, build relationships, and foster trust [15], rather than simply presenting suitable assets. + +Reflecting on these findings, the development of conversational financial advisors has drawn increasing attention, as it enables a dynamic understanding of users' needs, personalized guidance, and the potential to build trustworthy relationships [3, 9, 11, 18, 44]. In particular, the conversational agents' personality has gained attention as a factor that can help build relationships with clients and foster trust [21], especially given the successes of conversational agents using the Big Five personality model [23] to enhance the end-user experience [5, 33]. Although conversational agents show potential in finance, how to configure them to match the value of human advisors remains unclear. Therefore, we conduct a user study to examine how personalizing investment guidance and the advisor's personality shape users' financial decision-making effectiveness and overall user experience. + +# 3 Methodology + +In this paper we aim to determine to what extent current generative language models can act as an effective financial advisor. Indeed, given the need to personalize for the user, emotional implications, the technical nature of the information-seeking task, and high impact if failed, we argue that this is an excellent test case for the limits of generative large language models. To structure our evaluation, we divide our study into two phases, as illustrated in Figure 1, where we evaluate the success of both: + +(1) Preference Elicitation: During this stage, we have the LLM-advisor hold a natural language conversation with a human, where it is directed to collect information regarding the person's investment preferences. The human in this interaction is pretending to have preferences from a given investor profile. +(2) Advisory Discussion: During the advisory discussion, the LLM-advisor again has a natural language conversation with the human (acting on an investor profile), where the human collects information about whether a company is a suitable investment for them. This is repeated for multiple companies per investor profile. + +We provide preparatory information and discuss each stage in more detail below: + +# 3.1 Investor Profiles + +To fairly evaluate the ability of any LLM-advisor, we need to have them interact with human users with real needs. Given the open-ended nature of free-form conversations, it is desirable to repeat each experiment with different people such that we can observe variances in conversation paths, as those variances may influence task success. However, to enable repeatability, we need to hold the investor needs constant across repetitions. Hence, we define three archetypal investor profiles $i \in I$ based on input from a financial expert, where our human participants are given one to follow when conversing with the LLM-advisor: + +- Investor 1: Growth-Oriented Healthcare Enthusiast: Prefers healthcare innovations, values high-growth opportunities, and takes measured risks. +- Investor 2: Conservative Income Seeker: Seeks stable returns, invests in well-established companies, values regular dividend payouts. +- Investor 3: Risk-taking Value Investor: Targets undervalued companies with strong long-term potential, tolerates short-term volatility, and invests in cyclical sectors. + +For each of these investor profiles, we select three key investment preferences, chosen from well-known investment characteristics such as industry sector, stock style, consistency in dividend payments, and sensitivity to global market changes [10]. We denote the set of investor preferences as $i^{pref}$ . In our experiments, we simulate a realistic elicitation scenario where the advisor collects the preferences from the participants. Therefore, we do not straightforwardly provide the preferences to the participants. Instead, we present them as text narratives of between 150 to 200 words. A financial expert was consulted to confirm the quality and reliability of these narratives. An example narrative representing Investor 2 is illustrated in Figure 2, where we highlight the sentences referring to specific investor preferences. + +# 3.2 Stage 1: Preference Elicitation + +The goal of stage 1 of our study is to determine to what extent an LLM-advisor can effectively collect a user's investment preferences through conversation. Formally, given a participant of the user study $u$ and an investor profile $i$ , during the elicitation stage, the LLM-advisor aims to obtain an approximated set of preferences, denoted $i_u^{LLM}$ , that matches the investor preferences ( $i^{pref}$ ). To achieve this, the generative model produces a series of questions that participants answer by interpreting the investor narrative. + +Responses to those questions, denoted as $R_{i}^{u}$ , are used by the LLM-advisor to generate the user profile $i_{u}^{LLM}$ . Success is then measured by manually evaluating the overlap between $i^{pref}$ and $i_{u}^{LLM}$ . + +For user elicitation, we adopted a System-Ask-User-Respond (SAUR) paradigm [43]. During the conversation, the advisor proactively inquires about the user's preferences given a set of target preferences (e.g., industry type, acceptable risk). After the human participant responds to a question, the LLM-advisor checks whether the collected preferences cover all of the target preferences. If the advisor is confident that they do, it ends the conversation and prompts the user to proceed to the next stage; otherwise, it continues asking follow-up questions in a loop. + +# 3.3 Stage 2: Advisory Discussion + +Stage 2 of our study investigates to what extent an LLM-advisor can provide the same benefits as a real human advisor when exploring investment options. Note that the goal here is not to have the LLM-advisor promote any one asset, but rather to provide accurate and meaningful information such that the human can find the best investment opportunity for them. To this end, we structure our experiment such that the human (acting on an investor profile) has one conversation with the LLM-advisor for each of a set of assets being considered.1 After all assets are presented to the participant, a stock ranking is generated by sorting the stocks by the participant rating in descending order. + +Importantly, as we know the investor profile $i^{pref}$ for each conversation about an asset $a$ , we can objectively determine whether $a$ is a good investment given $i^{pref}$ , forming a ground truth against which we can compare to the rating provided by our human participant after their conversation with the LLM-advisor. For each asset $a$ , a financial expert produced a score between 0 and 3 by manually checking whether $a$ satisfied each of the three investment criteria contained in $i^{pref}$ . A ground-truth ranking was produced by sorting the assets by the expert scores. We show an example of the ranking construction in Figure 2. During evaluation, the closer the participant ranking is to the ranking produced by expert judgments, the better the LLM-advisor performed. + +Baseline Prompt: As we are working with an LLM-advisor and the nature of financial information-seeking is time-sensitive, we need to provide any information that might change over time to the LLM within the prompt. As such, for each asset $a$ , we pre-prepared a standard asset descriptor block after consulting with a financial expert, containing: + +Stock Prices: We collect monthly stock prices from 2023 using Yahoo! Finance.2 +- Business Summary: We gather each company's business overview from Yahoo! Finance. +- Recent Performance and Key Financial Indicators (e.g., EPS): We obtain earnings conference call transcripts3 from Seeking Alpha for the last quarter of 2023. + +![](images/fd66e0a7cc774da4caaecdf2f74a7c28e16bf0343f962b2806eee36f86cbf795.jpg) +Figure 3: User study structure. + +The advisor using this prompt acts as our baseline for the advisory discussion study. We augment this baseline with additional context and instructions to form two additional experimental scenarios, discussed below: + ++Personalization: As discussed earlier, one of the core roles of the financial advisor is to personalize to the individual customer, based on their financial situation, needs, and preferences. To enable the LLM-advisor to personalize for the user, we integrate the generated profile from the preference elicitation (Stage 1) $i_u^{LLM}$ into the prompt. We represent each preference as a series of short sentences. + ++Personality: In Section 2.2 we discussed how human financial advisors provide emotional support as well as financial advice. While it is unlikely that an LLM-advisor could do this as well as a human (it lacks both emotional intelligence and non-conversational clues to the customer's mental state [39]) it might be possible to provide a better end-user experience by directing the LLM-advisor to adopt a personality. As noted in Section 2 it is possible to do this via prompt engineering, such as instructing the LLM to take on the traits of one or more of the Big-Five personality types [23]. + +As we are performing a user study with humans, it would be impractical to exhaustively test every combination of personality types, hence as an initial investigation we experiment with two distinct personality profiles [32]: + +- Extroverted: High in extroversion, agreeableness, and openness; low in conscientiousness and neuroticism. +- Conscientious: Low in extroversion, agreeableness, and openness; high in conscientiousness and neuroticism. + +We adopted the prompting method from Jiang et al. (2024) to assign a Big Five personality trait to the LLM agent [14], choosing it for its simplicity and effectiveness among various proposed approaches for embedding personality in LLMs (including both prompting and fine-tuning) [13, 14, 31]. To ensure a high standard of professionalism and accurate representation of the intended personality, we consulted financial professionals to review the texts generated by LLMs adopting both personas. + +# 3.4 Experimental Design + +In our experiment, we conducted two studies: a personalization study (for RQ2) and an advisor persona study (for RQ3). In the personalization study, participants compared a non-personalized (Baseline) advisor with a personalized (+Personalized) version. In the advisor persona study, they compared different LLM-advisor personality types (+Extroverted vs. +Conscientious). Participants are randomly assigned to one of these two studies. + +Figure 3 shows the structure of our user study for a single participant, comprising seven steps: + +(1) Participant Training: Participants are given a general overview of the user study and given instructions on their expected roles during preference elicitation, advisory discussions, asset ranking, and advisor assessment. +(2) Investor Profile Allocation: The user $u$ is randomly allocated one of the investor profiles (See Section 3.1) that they will follow. Each profile is assigned to 42 participants. +(3) Preference Elicitation (Stage 1): The participant interacts with the LLM-advisor as if they were a new investor. The conversation ends once the LLM-advisor determines that they know enough about the investor to personalize for them. The median time spent on preference elicitation was 5 minutes and 11 seconds. +(4) Response Summarization: Given the aggregator of user responses $R_{i}^{u}$ , we instruct an LLM to generate an investor profile $i_{u}^{LLM}$ . For each investor preference in $i^{pref}$ , if there is any relevant information in the responses $R_{i}^{u}$ , that information is included in $i_{u}^{LLM}$ . Otherwise, $i_{u}^{LLM}$ indicates that no relevant information is available for that specific preference. +(5) Advisory Discussion (Stage 2): To simplify the conversation flow we have the participant hold separate conversations with the LLM-advisor for each asset they might invest in. The LLM-advisor is provided with context about the current asset (see Section 3.3), and depending on the experimental scenario, optionally personalization information (step 4 output) and/or a target personality context statement. Each conversation continues until the user is satisfied that they have enough information to rate the asset. The order in which the assets are discussed is randomly assigned to avoid position bias. +(6) Asset Ranking and Feedback: Participants rank all the stocks (four in total) discussed in the advisory session according to their desire to invest in each. They also assess the advisor they interacted with using a 7-point Likert scale for the items listed in Table 1 (see Section 4). + +To enable more effective pair-wise comparison of LLM-advisor variants, we have each participant test two variants per study. If the user has only tested one variant at this point, then they repeat the user study (starting at step 2) with the second variant. The order in which participants experience each variant is randomly assigned. + +Table 1: Operational definitions used in the advisor assessment questionnaire for all response dimensions. + +
Response DimensionOperational Definition
Perceived Personalization [16]The advisor understands my needs.
Emotional Trust [16]I feel content about relying on this advisor for my decisions.
Trust in Competence [16]The advisor has good knowledge of the stock.
Intention to Use [16]I am willing to use this advisor as an aid to help with my decision about which stock to purchase.
Perceived Usefulness [25]The advisor gave me good suggestions.
Overall Satisfaction [25]Overall, I am satisfied with the advisor.
Information Provision [38]The advisor provides the financial knowledge needed.
+ +In our experiments, we use Llama-3.1 8B as the background model for all our LLM-advisor variants. $^4$ + +# 3.5 Participants + +We recruited 64 participants from the authors' affiliated university for our study: 32 participants for the personalization study and 32 participants for the advisor persona study, utilizing the university's online platform and blackboard for recruitment. Participants were required to be fluent in English, over 18 years old, and have an interest in finance and investment, mirroring the target demographic of our system's users. After excluding invalid data, 29 participants remained in the personalization study and 31 in the advisor persona study. We conducted a power analysis using the Wilcoxon signed-rank test for matched pairs, with the experimental conditions as the independent variable and users' response to the advisor assessment questionnaire as the dependent variable [29]. The analysis determined that 29 participants are needed to observe a statistically significant effect on user-perceived quality. Our recruitment criteria and compensation (£10/hour) for approximately one hour of participation were approved by our organization's ethical board. + +# 4 Evaluation Metrics and Statistics + +In this section we discuss how we quantify effectiveness for the preference elicitation and advisory discussion stages, respectively, in addition to summarizing dataset statistics for each. + +# 4.1 Preference Elicitation Metrics (Stage 1) + +To evaluate the quality of the first preference elicitation stage, we want to measure how well the LLM-advisor has captured the investor preferences as defined in the investor profile $i$ (see Section 3.1). Each investor profile $i \in I$ defines key features of the investor, such as preferring high-growth stocks, or favoring regular payouts, denoted $i^{pref}$ . We have three investor profiles ( $|I| = 3$ , with $10(n)$ participants performing elicitation on $i_u^{LLM}$ for each profile and each LLM variant, i.e. there are 120 elicitation attempts in total, with 30 attempts per LLM-advisor variant. Following the notation in Section 3, $i_u^{LLM}$ in this case denotes a similar list of features to $i^{pref}$ that LLM-advisor learned about the investor during conversation with a participant $u$ , which we derive from a manual analysis of the elicitation output (i.e. what is produced by response summarization). Intuitively, the closer the features produced from + +Table 2: General statistics of the collected conversation data. + +
Participants60
Time Period2024/10/24 ~ 2024/11/7
Total Turns10,008
Stage 1: Preference Elicitation
Total Turns1,788
Number of Sessions120
Avg. Turns/Session15.8
Avg. User Words/Turn9.8
Stage 2: Advisory Discussion
Total Turns8,220
Number of Sessions480
Avg. Turns/Session18.2
Avg. User Words/Turn13.0
+ +any elicitation attempt $i_{u}^{LLM}$ is to $i^{pref}$ , the better the LLM-advisor is performing. To this end, we report elicitation accuracy for each investor profile, calculated as: + +$$ +\text {E l i c i t a t i o n A c c u r a c y} (i) = \frac {1}{n} \sum_ {j = 1} ^ {n} \frac {\left| i _ {j} ^ {L L M} \cap i ^ {p r e f} \right|}{\left| i ^ {p r e f} \right|} \tag {1} +$$ + +Human Advisor: To provide a point of comparison, we also conduct a preference elicitation with a financial expert using the same prompt and instructions as the LLM. This allows us to evaluate how close LLMs are to a paid human advisor undertaking the same task. More specifically, for each investor profile, three participants engaged with this expert, who then produced a set of preferences $i_u^{Expert}$ , which can be used instead of $i_u^{LLM}$ in Equation 1. + +# 4.2 Advisory Effectiveness Metrics (Stage 2) + +Ranking correlation (Spearman's Rho): In the second stage, we evaluate how well the LLM-advisor can support an investor to select financial assets that are suitable for them to invest in. Recall from Figure 3 that after a participant finishes discussing all assets with the LLM-advisor, they rank those assets $a \in A_i$ based on the likelihood they will invest in each, i.e. each participant $u$ acting on a profile $i$ we have an asset ranking $R(A_i, i_u)$ . As illustrated in Figure 2, each investor profile $i$ was derived from a ground truth set of investor preferences $i^{pref}$ , which an expert used to create a ground truth ranking $R(A_i, i^{pref})$ , i.e. the "correct" ranking of assets. Intuitively the closer the $R(A_i, i_u)$ is to $R(A_i, i^{pref})$ , the better the advisor is performing, as the participant was better able to distinguish suitable assets vs. unsuitable ones. Hence, to evaluate the effectiveness of the advisory task, we report the mean ranking correlation (Spearman's Rho) between $R(A_i, i_u)$ and $R(A_i, i^{pref})$ across participants $u$ for each LLM-advisor. + +Advisor Assessment Questionnaire: Lastly, we also gather qualitative data from each participant via a questionnaire. In particular, after ranking assets each participant, reports how they feel the LLM-advisor performed in terms of 7 dimensions, listed in Table 1, such as perceived usefulness, trust, and user satisfaction. We use this data later to evaluate how sensitive the user is to differences in the LLM-advisor. + +Table 3: Stage 1 - Comparison of Elicitation Accuracy of an expert vs. different LLM-advisors for each investor profile. The best advisor is highlighted in bold. Arrows denote percentage increases $(\uparrow)$ or decreases $(\downarrow)$ compared to the expert. + +
Investor ProfileExpertLLM-Advisors
LLM+Extr.+Cons.Average
Growth-Oriented0.780.760.800.790.78→0.0%
Conservative-Income0.890.820.750.870.82↓7.8%
Risk-Taking0.890.480.600.550.53↓40.5%
Average0.850.690.700.730.70↓17.6%
+ +# 4.3 Dataset Statistics + +Table 2 summarizes the statistics of the data collected during the two stages of our user study. Each conversation that a participant had with an LLM-advisor in either stage 1 or 2 is referred to as a session, e.g. during Stage 1, there were 3 investor profiles * 10 participants * 4 LLM-advisors, resulting in 120 sessions. Stage 2 has 4x the number of sessions, as there are four assets associated with each profile ( $A_i$ ) to discuss with the LLM-advisor. + +From Table 2 we observe that in contrast to other conversational tasks [36, 37], financial information-seeking appears to require more extended interactions. On average, preference elicitation involves 15 turns per session with 9.8 words per turn, whereas advisory discussions involve 18 turns per session with 13.0 words per turn, highlighting the overall complexity of the task. + +# 5 Results + +In this work, we explore how to design conversational financial advisors that enhance both decision-making and positive experience. To achieve this, our user study is guided by 3 core research questions. + +- RQ1: Can LLM-advisors effectively elicit user preferences through conversation? +- RQ2: Does personalization lead to better decisions and more positive advisor assessment? +- RQ3: Do different personality traits affect decision quality and advisor assessment? + +# 5.1 RQ1: Elicitation accuracy + +We begin by examining how effective the LLM-advisors are at identifying investment preferences during conversations in Stage 1. Elicitation Accuracy is the primary metric, where we contrast the mean accuracy across 10 sessions in comparison to a human expert tackling the same task (see Section 4.1). Table 3 reports elicitation accuracy for each LLM-advisor and the Human Expert across investment profiles. Arrows denote percentage increases $(\uparrow)$ or decreases $(\downarrow)$ of the LLM-advisor compared to the expert. + +To set expectations, we first consider the performance of the expert in the first column in Table 3, as we might expect, the expert maintains consistently high performance across all profiles, averaging $85\%$ accuracy (random accuracy is $50\%$ ). This forms an expectation of the performance ceiling for the task. + +Next, we compare the expert performance to each LLM-advisor. From the perspective of preference elicitation, there are three LLM-advisor configurations, those that use only the Baseline Prompt (denoted LLM) from the personalization study, and those that include + +a defined personality (either extroverted, $+\mathrm{Extr}$ , or conscientious, $+\mathrm{Cons}$ .) from the advisor persona study. From Table 3, we observe that the LLM-advisor's performance is generally strong for growth-oriented, and conservative-income investors (with accuracy around $80\%$ ) on average, which is similar to the human advisor. However, for the risk-taking investor profile, the LLM-advisor's elicitation accuracy was substantially lower $(-40.5\%)$ . + +From a manual failure analysis, we observed the following trends that contribute to the performance gap with the human advisor, particularly for the risk-taking profile. First, it is notable that elicitation failures can originate from the investor (participant) rather than the LLM. Recall that one of the aspects that makes finance more challenging than domains like movie recommendation is that the "user" is inexpert, and so may give incorrect information during the conversation. Indeed, we observed cases where the participant confused concepts such as the difference between a growth and a value stock, as well as cyclical/non-cyclical assets. On the other side, preference hallucination is a core issue for the LLM-advisor. The LLM is a probabilistic token generator conditioned on the baseline prompt and prior conversation, and as a result, in some scenarios, the contextual content can override a statement by the investor. This type of error is more likely when the investor is unsure in their responses or when they provide contradictory statements. For instance, an investor expressing an interest in the consumer discretionary sector while simultaneously opting for non-cyclical stocks, despite consumer discretionary being inherently cyclical. + +To answer RQ1, our results demonstrate that LLM-advisor's are able to elicit preferences from a user via conversation and that for 2/3's of the user profiles tested, elicitation accuracy was consistently equivalent or close to that of an expert human advisor. However, we observed a clear failure mode when testing the risk-taking profile, where misunderstandings by the investors and hallucinations within the LLM compound to result in accuracy that is close to random. Overall, we consider this a promising result, as the majority of the time it is effective, and the failure mode observed might be rectified by better context crafting and the addition of contradiction detection; both directions for future research. + +# 5.2 RQ2: Effectiveness of personalization + +Having shown that automatic preference elicitation is possible, we now examine stage 2 of our study, namely the advisory discussions. Given the inherently personalized nature of financial advice, we expect that the customer preferences obtained during stage 1 will be key to enabling LLM-advisors to provide effective investment advice. Hence, in this section, we compare the performance of an LLM-advisor using only the Baseline Prompt to one that includes the preferences obtained during stage 1 (+Personalized). However, as we observed that preference elicitation is not always successful, we also examine what effect elicitation performance has on the LLM-advisor. + +5.2.1 Non-personalized Decision-making Effectiveness: We initially establish how effective the LLM-advisor is without any information regarding the investor. LLM-advisor effectiveness is measured + +Table 4: Investor decision-making effectiveness, expressed as the Spearman's Rho correlation between the investor's asset ranking and the expert asset ranking (higher is better). $\dagger$ indicates statistical improvements (Welch's t-test with $p<0.05$ ) over the not personalized baseline, while $\S$ indicates significant differences between cases with successful and unsuccessful preference elicitations. + +
Advisor ConfigInvestor vs. Expert (Spearman's Rho)
PersonalizationPersonalityAllPreference Elicitation
SuccessfulUnsuccessful
BaselineNone0.110--
+PersonalizedNone0.3100.481†§-0.228
+Personalized+Extroverted0.1220.243§-0.286
+Personalized+Conscientious0.260.365-0.025
+ +based on how well the investor was able to rank the assets discussed by suitability to them. The primary metric is average Spearman's Rho correlation between the investor ranking and the ground truth ranking (see Section 4.2), reported in Table 4 row 1. As we expect, baseline advisory performance is low, with only a very weak positive correlation to the ground truth ranking of 0.11. This indicates that without further evidence, the LLM is not able to meaningfully guide the investor. + +5.2.2 Personalized Decision-making Effectiveness: Having established our baseline, we now examine the impact that adding the investor preferences collected during stage 1 has, comparing Table 4 row 1 (baseline) to row 2 (personalized). As we anticipated, personalization is beneficial, with investor decision-making effectiveness increasing from 0.11 to 0.31 (average Spearman's Rho correlation to the expert ranking). However, this correlation is still weak, illustrating that while discussing assets with the LLM-advisor is better than no help at all, our participants are still struggling to evaluate the suitability of financial assets. + +This correlation is an average over all the participants in the user study, regardless of how effective their preference elicitation was in stage 1. Hence, we might ask whether the low correlation is due to the LLM-advisor being confused by poor preference elicitation data. To explore this, Table 4 also reports investor decision-making effectiveness stratified based on whether stage 1 was successful (column 4) or not (column 5). As expected, we see a statistically significant increase in investor decision-making effectiveness when preference elicitation was successful when compared to non-personalized sessions (0.481 vs. 0.110). More concerningly, we also see the LLM-advisor has a strong negative influence on the investors' decision-making capability if preference elicitation fails, as illustrated by the negative correlations with the expert in column 5. This result highlights both that effective preference elicitation is crucial, but also that the LLM-advisor can easily influence the investor into making poor decisions, as the human is heavily reliant on the agent to navigate the relatively unfamiliar financial information space. + +5.2.3 Participant Assessment of the Advisor: So far we have demonstrated that there is a large difference between a non-personalized LLM-advisor and a personalized one, in terms of how they can + +Table 5: Average participant users' response to advisor assessment questionnaire under different advisor conditions. Columns labeled with advisor condition (Baseline, +Pers., +Cons., +Extr.) contain a 7-point Likert scale (higher is better). "p" column contains Wilcoxon signed-rank test $p$ -values for (RQ2) Baseline vs. +Personalized (Pers.), and (RQ3) +Conscientious (Cons.) vs. +Extroverted (Extr), for both the full data (All) and the subset where the elicitation accuracy is above 0.5. "Successful Elicitation" refers to the subset where elicitation accuracy was ≥ 0.5. For RQ2, this subset consists of pairs for which +Pers elicitation is successful, while for RQ3, it consists of pairs for which both +Extr and +Cons elicitation are successful. Boldface indicates significant effects with † for $p < 0.1$ and ‡ for $p < 0.05$ . + +
Response Dimension(RQ2) Baseline vs. +Personalized(RQ3) +Conscientious vs. +Extroverted
AllSuccessful ElicitationAllSuccessful Elicitation
Baseline+Pers.pBaseline+Pers.p+Cons.+Extr.p+Cons.+Extr.p
Perceived Personalization5.7595.7240.8385.7625.9050.7515.5005.5000.6635.5885.7060.941
Emotional Trust5.1035.2410.4465.1435.3330.5375.0385.1540.6004.7065.2350.034‡
Trust in Competence5.6905.6900.8175.8105.8570.7825.9626.0770.5386.0006.0001.000
Intention to Use5.3105.4830.5055.4295.7140.1664.8855.4620.005‡4.9415.5880.013‡
Perceived Usefulness5.2415.5170.1835.3815.8100.1945.4235.5380.4255.1765.1180.968
Overall Satisfaction5.3455.6900.1165.4295.8100.098†5.2695.5770.1795.1185.5290.244
Information Provision5.5175.9660.026‡5.7146.1430.053†5.6925.6540.9535.5885.7650.490
+ +alter the decision-making of the investor/participant. But can the participant tell the differences between them? + +Table 5 reports the aggregation of the qualitative data we collected from each participant after they finished interacting with each LLM-advisor in terms of 7 dimensions, where we start by focusing on the RQ2-All columns, i.e. comparing the baseline and personalized variants. The important observation to note here is that the participant preference scores for both variants are statistically indistinguishable, except under the quality of information provision criteria. This means that our participants cannot tell if the LLM-advisor is personalizing to them, and trust the worse agent just as much as the better one. Furthermore, if we consider the best case scenario where the preference elicitation was successful (RQ2 Successful Elicitation columns) we observe the same pattern, even though the difference between the baseline and the personalized variants in terms of the effect it has on the participant decision-making is more pronounced. This underlines one of the core risks of using LLM-advisors in the financial domain; since our users are inherently inexpert they lack the fundamental skills to judge to what extent the LLM is providing good advice, meaning that there is no safety net if the LLM makes a mistake. + +To answer RQ2, our results show that a personalized LLM-advisor is able to provide useful financial advice when it has accurate information regarding the preferences of the investor. This is demonstrated by better decision-making capability by participants using the personalized advisor in comparison to the non-personalized one. However, we also identified two important challenges to adoption. First, the impact the LLM-advisor has is strongly tied to the quality of the preference elicitation data provided, where poor preference elicitation will cause the agent to actively direct the investor to the wrong assets. Second, while the participants were positive regarding the LLM-advisors across all questionnaire criteria, they were not able to consistently tell the difference between good and bad advisors; leading to an increased risk of humans acting on bad advice. + +# 5.3 RQ3: Effectiveness of personalities + +Once we have confirmed the utility of personalization for LLM-advisors, we now study the effect that the personality of the advisor has on users' financial information-seeking. As previous studies have shown [32], chatbot personality can affect the way humans interact with the chatbot, and therefore affect the effectiveness and perception of LLM-advisors. To understand whether personality affects LLM financial advisors, we compare two personalized LLM-advisors on which we have injected a pre-defined personality: an extroverted personality and a conscientious personality. While we could consider the personalized LLM-advisor discussed in Section 5.2 as a third distinct personality (the base LLM personality of the LLM), we shall not compare it with our personality-injected models, because different sets of participants were used in the personalization study and the advisor-persona study. + +5.3.1 Decision-making Effectiveness: We first examine the impact of adding personality to the advisors on the decision-making process, by measuring the capacity of the participants to correctly rank the assets (as previously done in Section 5.2). As a primary metric, we again use the average Spearman's Rho correlation between the investor ranking and the ground truth ranking reported in Table 4 rows 3 (extroverted advisor) and row 4 (conscientious advisor). + +We first observe the results for the full set of participants in the user study. Interestingly, we observe a difference between the two advisors, with the conscientious LLM-advisor providing better guidance than the extroverted one (0.26 vs. 0.122). This observation is consistent when we restrict our analysis to those cases where the preference elicitation is successful. While, expectedly, the effectiveness of both advisors improves when the elicitation is successful (0.243 vs. 0.122 in the case of the extroverted advisor and 0.365 vs. 0.26 in the case of the conscientious one), the conscientious advisor has an advantage over the extroverted one (0.365 vs. 0.26). + +These results highlight that providing different personalities to an LLM-advisor can notably impact the capacity of the advisor to provide useful information to the investors. + +5.3.2 Participant Assessment of the Advisor: We have observed so far that the use of different personalities affects the user decision-making process. But how do these personalities affect the perception that users have of the LLM-advisor? We observe this in Table 5, in terms of the seven dimensions captured during the advisor assessment questionnaire. + +We first look at the RQ3-All columns, comparing the two personalities. Notably, for the majority of the dimensions, users barely distinguish between both systems. The only answer where we observe a statistically significant difference is the intention to use the system in the future. Surprisingly, despite providing worse guidance to the investor, participants expressed a higher interest in using the extroverted advisor than the conscientious one. When we limit our study to those participants who experienced a successful preference elicitation in both advisor variants, this issue is stressed, as those users also develop a significantly greater emotional trust with the extroverted advisor. + +These observations are worrisome, as they reveal that the personality of a financial advisor cannot only affect the quality of the advice but also lead the investors to trust more on those systems providing worse advice. + +5.3.3 Differences in language: To further understand how personalities affect financial advisory, we analyze the differences in the linguistic patterns provided by extroverted and conscientious advisors. Analyzing participants' reported overall experience from the exit questionnaires in the advisor persona study, over $20\%$ (7 of 31) described the extroverted advisor as clear, assertive, and cheerful while perceiving the conscientious advisor as straightforward, analytical, yet less confident. Therefore, to quantify the linguistic differences in the advisors, we conduct a financial sentiment analysis of the utterances generated by each advisor. For each utterance, we count the occurrences of positive, negative, and uncertain words from the Loughran and McDonald Financial Sentiment Dictionary [22]. We normalize these counts by the length of the sentences and average the results across all dialogues. + +Figure 4 shows the results, showing the extroverted sentiment scores in blue, and the conscientious scores in orange. For the three sentiment dimensions, differences between advisors are statistically significant (Welch's t-test with $p < 0.01$ ). Figure 4 shows that extroverted advisors tend to use more positive language in their interactions, while conscientious advisors prefer negative and uncertain tones. Through manual analysis of the conversation, we observe that this results in the extroverted advisor focusing on the positive aspects of investments while overlooking serious drawbacks, whereas the conscientious advisor provides a more balanced view of the assets. Because of this, participants guided by conscientious advisors may make more well-informed financial decisions. Meanwhile, the positivity of the extroverted advisor seems more appreciated by the users, which is reflected in higher advisor assessment scores from the post-discussion questionnaire. + +To answer RQ3, our results show that different personalities of a personalized LLM-advisor can affect the utility of the provided advice. This is demonstrated by the better decisions of the study + +![](images/709d4faf5630cdd0daf077acd764d4a59ec9401ffa6ebd54c3c945fdcecc8bbc.jpg) +Figure 4: Average sentiment scores by advisor personality (extroverted in light blue and conscientious in pastel orange) and category (Positive, Negative, and Uncertainty). Error bars indicate the standard deviation. + +participants when using an advisor with a conscientious personality than when using an advisor with an extroverted personality. Moreover, the personality of the advisor affects the perception of humans towards the system, and it has the risk of leading investors to further trust those systems that provide worse advice. + +# 6 Conclusion + +In this paper, we have conducted a lab-based user study to examine how effective large language models are as financial advisors. We focus on three core challenges: preference elicitation, investment personalization, and advisor personality. + +First, our analysis shows that LLMs are effective tools for preference elicitation through conversation. In a majority of cases, they are capable of obtaining investor's preferences with an accuracy close to or equivalent to that of an expert human advisor. However, there are some clear failure cases, as LLMs are vulnerable to contradictory statements and hallucinations, which, in the case of complex investor profiles, can decrease the accuracy of the elicitation to random levels. Although LLMs are promising for elicitation, in a complex domain like finance, investors do not always fully understand their own preferences (or they have difficulties expressing them). Therefore, future work should explore the development of LLM-advisors capable of resolving conflicting user needs. + +Second, personalizing LLMs to provide investment advice can improve the decisions made by the investors, but only when the personalized LLM-advisor receives accurate information about the investor's preferences. If the preference elicitation is not successful, the agent actively directs the investors to the wrong assets on which to invest. This underscores how crucial a good preference elicitation is for providing useful financial advice. + +Finally, our results suggest that investors are not necessarily aware of what constitutes good financial advice, and therefore, are vulnerable to acting on bad advice provided by LLMs. In the comparison between a non-personalized and a personalized LLM-advisor, although the personalized system led to better decisions, participants were unable to distinguish between the systems. More worryingly, when comparing two personalized advisors with extroverted and conscientious personalities, we observed that, even though the extroverted advisor provided lower-quality advice, participants trusted this advisor more than the conscientious one. + +Our findings highlight that, while personalized LLM-advisors represent a promising research direction, their use in high-stakes domains like finance is not free of risks: due to the limitations of LLMs at capturing complex investment preferences, and the difficulty of investors to discern whether the advice they receive truly serves their interests, LLMs have a notable risk to drive investors to bad financial assets (leading not only to a low satisfaction but also to potentially large monetary losses). However, these drawbacks open interesting research directions not only from a system perspective, but also from a human-centered approach: automated advisory development where we do not just focus on improving the quality of automated systems to guide investors, but also on how the investors will adopt, trust and interact with these AI agents [6, 20]. + +# References + +[1] James E. Allen, Curry I. Guinn, and Eric Horvitz. 1999. Mixed-initiative interaction. IEEE Intelligent Systems and their Applications 14, 5 (1999), 14-23. +[2] Ashay Argal, Siddharth Gupta, Ajay Modi, Pratik Pandey, Simon Shim, and Chang Choo. 2018. Intelligent travel chatbot for predictive recommendation in echo platform. In 2018 IEEE 8th Annual Computing and Communication Workshop and Conference (CCWC 2018). IEEE, 176-183. +[3] Andreas Bucher, Mateusz Dolata, Sven Eckhardt, Dario Staehelin, and Gerhard Schwabe. 2024. Talking to Multi-Party Conversational Agents in Advisory Services: Command-based vs. Conversational Interactions. Proceedings of the ACM on Human-Computer Interaction 8, GROUP (2024). +[4] Wanling Cai, Yucheng Jin, and Li Chen. 2022. Impacts of personal characteristics on user trust in conversational recommender systems. In Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems (CHI 2022). Article 489, 14 pages. +[5] Gary Charness, Uri Gneezy, and Alex Imas. 2013. Experimental methods: Eliciting risk preferences. Journal of Economic Behavior & Organization 87 (2013), 43-51. +[6] Erin K. Chiou and John D. Lee. 2023. Trusting automation: Designing for responsivity and resilience. Human factors 65, 1 (2023), 137-165. +[7] Konstantina Christakopoulou, Filip Radlinski, and Katja Hofmann. 2016. Towards conversational recommender systems. In Proceedings of the 22nd ACM SIGKDD international conference on knowledge discovery and data mining (KDD 2016). 815-824. +[8] Berardina De Carolis, Marco de Gemmis, Pasquale Lops, and Giuseppe Palestra. 2017. Recognizing users feedback from non-verbal communicative acts in conversational recommender systems. Pattern Recognition Letters 99 (2017), 87-95. +[9] Mateusz Dolata, Doris Agotai, Simon Schubiger, and Gerhard Schwabe. 2019. Pen-and-paper Rituals in Service Interaction: Combining High-touch and High-tech in Financial Advisory Encounters. Proceedings of the ACM on Human-Computer Interaction 3, CSCW, Article 224 (2019). +[10] Eugene F Fama and Kenneth R French. 1998. Value versus growth: The international evidence. The journal of finance 53, 6 (1998), 1975-1999. +[11] Christian Hildebrand and Anouk Bergner. 2021. Conversational robo advisors as surrogates of trust: onboarding experience, firm perception, and consumer financial decision making. Journal of the Academy of Marketing Science 49, 4 (2021), 659-676. +[12] Dietmar Jannach, Ahtsham Manzoor, Wanling Cai, and Li Chen. 2021. A survey on conversational recommender systems. Comput. Surveys 54, 5 (2021), 1-36. +[13] Guangyuan Jiang, Manjie Xu, Song-Chun Zhu, Wenjuan Han, Chi Zhang, and Yixin Zhu. 2024. Evaluating and inducing personality in pre-trained language models. In Proceedings of the 37th Conference on Neural Information Processing Systems (NeurIPS 2023). +[14] Hang Jiang, Xiajie Zhang, Xubo Cao, Cynthia Breazeal, Deb Roy, and Jad Kabbara. 2024. PersonalLLM: Investigating the Ability of Large Language Models to Express Personality Traits. In Findings of the Association for Computational Linguistics: NAACL 2024. 3605-3627. +[15] Francis M. Kinniry Jr., Colleen M. Jaconetti, Michael A. DijJoseph, Yan Zilbering, Donald G. Bennyhoff, and Georgina Yarwood. 2020. Putting a value on your value: Quantifying Vanguard Adviser's Alpha in the UK. Technical Report. The Vanguard Group, Valley Forge, Pennsylvania, USA. +[16] Sherrie Y.X. Komiak and Izak Benbasat. 2006. The effects of personalization and familiarity on trust and adoption of recommendation agents. MIS quarterly (2006), 941-960. +[17] Ivica Kostric, Krisztian Balog, and Filip Radlinski. 2021. Soliciting user preferences in conversational recommender systems via usage-related questions. In Proceedings of the 15th ACM Conference on Recommender Systems. 724-729. + +[18] Kausik Lakkaraju, Sara E. Jones, Sai Krishna Revanth Vuruma, Vishal Pallagani, Bharath C. Muppasani, and Biplav Srivastava. 2023. LLMs for Financial Advise-ment: A Fairness and Efficacy Study in Personal Decision Making. In Proceedings of the 4th ACM Conference on AI in Finance (ICAIF 2023). 100-107. +[19] Cong Li. 2016. When does web-based personalization really work? The distinction between actual personalization and perceived personalization. Computers in human behavior 54 (2016), 25-33. +[20] Zhuoyan Li, Zhuoran Lu, and Ming Yin. 2023. Modeling human trust and reliance in AI-assisted decision making: a markovian approach. In Proceedings of the 37th AAAI Conference on Artificial Intelligence (AAAI 2023/IAAI 2023/EAAI 2023). Article 679. +[21] Andrew W. Lo and Jillian Ross. 2024. Can ChatGPT Plan Your Retirement?: Generative AI and Financial Advice. Harvard Data Science Review (2024). Issue Special Issue 5. +[22] Tim Loughran and Bill McDonald. 2011. When is a liability not a liability? Textual analysis, dictionaries, and 10-Ks. The Journal of finance 66, 1 (2011), 35-65. +[23] Robert R. McCrae and Oliver P. John. 1992. An introduction to the five-factor model and its applications. Journal of personality 60 2 (1992), 175-215. +[24] Sourav Medya, Mohammad Rasoolinejad, Yang Yang, and Brian Uzzi. 2022. An Exploratory Study of Stock Price Movements from Earnings Calls. In Companion Proceedings of the Web Conference 2022 (WWW 2022). Association for Computing Machinery, 20-31. +[25] Pearl Pu, Li Chen, and Rong Hu. 2011. A user-centric evaluation framework for recommender systems. In Proceedings of the 5th ACM conference on Recommender Systems (RecSys 2011). 157-164. +[26] Filip Radlinski, Krisztian Balog, Bill Byrne, and Karthik Krishnamoorthi. 2019. Coached conversational preference elicitation: A case study in understanding movie preferences. In Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue (SIGDIAL 2019). 353-360. +[27] Filip Radlinski and Nick Craswell. 2017. A theoretical framework for conversational search. In Proceedings of the 2nd Conference on Human Information Interaction and Retrieval (CHIIR 2017). 117-126. +[28] Oscar Sainz, Jon Campos, Iker Garcia-Ferrero, Julien Etxaniz, Oier Lopez de Lacalle, and Eneko Agirre. 2023. NLP Evaluation in trouble: On the Need to Measure LLM Data Contamination for each Benchmark. In Findings of the Association for Computational Linguistics: EMNLP 2023, Houda Bouamor, Juan Pino, and Kalika Bali (Eds.). Association for Computational Linguistics, 10776-10787. +[29] Tetsuya Sakai. 2018. Laboratory experiments in information retrieval. The information retrieval series 40 (2018), 4. +[30] Javier Sanz-Cruzado, Edward Richards, and Richard McCreadie. 2024. FAR-AI: A Modular Platform for Investment Recommendation in the Financial Domain. In Proceedings of the 46th European Conference on Information Retrieval (ECIR 2024), Part V. Springer-Verlag, Glasgow, United Kingdom, 267-271. +[31] Yunfan Shao, Linyang Li, Junqi Dai, and Xipeng Qiu. 2023. Character-LLM: A Trainable Agent for Role-Playing. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing (EMNLP 2023). Association for Computational Linguistics, 13153-13187. +[32] Tuva Lunde Smestad and Frode Volden. 2019. Chatbot personalities matters: improving the user experience of chatbot interfaces. In 5th International Conference Internet Science: (INSCI 2018). Springer, 170-181. +[33] David J Streich. 2023. Risk preference elicitation and financial advice taking. Journal of Behavioral Finance 24, 3 (2023), 259-275. +[34] Yueming Sun and Yi Zhang. 2018. Conversational recommender system. In Proceedings of the 41th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR 2018), 235-244. +[35] Takehiro Takayanagi, Kiyoshi Izumi, Atsuo Kato, Naoyuki Tsunedomi, and Yukina Abe. 2023. Personalized Stock Recommendation with Investors' Attention and Contextual Information. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR 2023). Association for Computing Machinery, 3339-3343. +[36] Johanne R. Trippas, Sara Fahad Dawood Al Lawati, Joel Mackenzie, and Luke Gallagher. 2024. What do Users Really Ask Large Language Models? An Initial Log Analysis of Google Bard Interactions in the Wild. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR 2024). 2703-2707. +[37] Johanne R. Trippas, Luke Gallagher, and Joel Mackenzie. 2024. Re-evaluating the Command-and-Control Paradigm in Conversational Search Interactions. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management (CIKM 2024). Association for Computing Machinery, 2260-2270. +[38] Patchara Vanichvasin. 2021. Chatbot Development as a Digital Learning Tool to Increase Students' Research Knowledge. International Education Studies 14, 2 (2021), 44-53. +[39] Xuena Wang, Xueting Li, Zi Yin, Yue Wu, and Jia Liu. 2023. Emotional intelligence of large language models. Journal of Pacific Rim Psychology 17 (2023), 18344909231213958. +[40] Pontus Wärnestäl. 2005. User evaluation of a conversational recommender system. In Proceedings of the 4th Workshop on Knowledge and Reasoning in Practical + +Dialogue Systems. +[41] Hamed Zamani, Johanne R Trippas, Jeff Dalton, Filip Radlinski, et al. 2023. Conversational information seeking. Foundations and Trends in Information Retrieval 17, 3-4 (2023), 244-456. +[42] Markus Zanker, Laurens Rook, and Dietmar Jannach. 2019. Measuring the impact of online personalisation: Past, present and future. International Journal of Human-Computer Studies 131 (2019), 160–168. +[43] Yongfeng Zhang, Xu Chen, Qingyao Ai, Liu Yang, and W Bruce Croft. 2018. Towards conversational search and recommendation: System ask, user respond. In Proceedings of the 27th ACM International Conference on Information and Knowledge Management (CIKM 2018). 177-186. + +[44] Huaqin Zhao, Zhengliang Liu, Zihao Wu, Yiwei Li, Tianze Yang, Peng Shu, Shaochen Xu, Haixing Dai, Lin Zhao, Gengchen Mai, et al. 2024. Revolutionizing Finance with LLMs: An Overview of Applications and Insights. arXiv preprint arXiv:2401.11641 (2024). +[45] Dávid Zibriczky. 2016. Recommender systems meet finance: a literature review. In Proceedings of the 2nd International Workshop on Personalization & Recommender Systems in Financial Services (FinRec 2016). 1-10. +[46] Liv Ziegfeld, Daan Di Scala, and Anita HM Cremers. 2025. The effect of preference elicitation methods on the user experience in conversational recommender systems. Computer Speech & Language 89 (2025), 101696. \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05862/images/2a53765f62dcad92803105547e09853335cbdae1725255060f964dc9c83649c1.jpg b/data/2025/2504_05xxx/2504.05862/images/2a53765f62dcad92803105547e09853335cbdae1725255060f964dc9c83649c1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ceab1af7cc7bb7136cc3e134ea3ca006e6dc0e7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05862/images/2a53765f62dcad92803105547e09853335cbdae1725255060f964dc9c83649c1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8be7bbf0b36740551f241178dafadfb7098d604bac412bde78ebb4e2426e2a2e +size 8028 diff --git a/data/2025/2504_05xxx/2504.05862/images/386ffde58c6627098c75cf4e90481d55e10c3880e9b1334aba927ea8a1c86fd4.jpg b/data/2025/2504_05xxx/2504.05862/images/386ffde58c6627098c75cf4e90481d55e10c3880e9b1334aba927ea8a1c86fd4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2714d2ea1af1e9b4a0d606093a0b37c025bf6792 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05862/images/386ffde58c6627098c75cf4e90481d55e10c3880e9b1334aba927ea8a1c86fd4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2736db51d5159ae1eff11ba4b1a08808f91b207eed5d057a961aaad735b99605 +size 67762 diff --git a/data/2025/2504_05xxx/2504.05862/images/4cee9a16e36b20686fedd91c3c5c33e55f7ed2b3b09d0c248c6e09e369f3f9f7.jpg b/data/2025/2504_05xxx/2504.05862/images/4cee9a16e36b20686fedd91c3c5c33e55f7ed2b3b09d0c248c6e09e369f3f9f7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..99413bef9af184f86a8dbbe40c0ace381e79c7eb --- /dev/null +++ b/data/2025/2504_05xxx/2504.05862/images/4cee9a16e36b20686fedd91c3c5c33e55f7ed2b3b09d0c248c6e09e369f3f9f7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2af6957f953b024803fd23e16499a896455cb176d0c0d4fb53f4b51528aed2ca +size 40819 diff --git a/data/2025/2504_05xxx/2504.05862/images/5e64b4ef44e612c46e88ed22abbd02c291273be03180e6dbc3ff63f2edfb84e5.jpg b/data/2025/2504_05xxx/2504.05862/images/5e64b4ef44e612c46e88ed22abbd02c291273be03180e6dbc3ff63f2edfb84e5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..178963f609cd7f975f938899972106fa78d980ff --- /dev/null +++ b/data/2025/2504_05xxx/2504.05862/images/5e64b4ef44e612c46e88ed22abbd02c291273be03180e6dbc3ff63f2edfb84e5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f30dae39d8fa00b166c5e9415b11ee829c5ae75088de9b757d9a011943fbafb2 +size 83136 diff --git a/data/2025/2504_05xxx/2504.05862/images/709d4faf5630cdd0daf077acd764d4a59ec9401ffa6ebd54c3c945fdcecc8bbc.jpg b/data/2025/2504_05xxx/2504.05862/images/709d4faf5630cdd0daf077acd764d4a59ec9401ffa6ebd54c3c945fdcecc8bbc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ec26003cbcbec6c9727f94346b23c59b7b7019a8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05862/images/709d4faf5630cdd0daf077acd764d4a59ec9401ffa6ebd54c3c945fdcecc8bbc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bf0552dbd118f7c50b3771c03be87522bca2336c4bd738a2f1ceade13110d99 +size 21588 diff --git a/data/2025/2504_05xxx/2504.05862/images/8d963780ab0d5919fae3ad330b87890337b19b348fbed38c53dc36c9ddfcde88.jpg b/data/2025/2504_05xxx/2504.05862/images/8d963780ab0d5919fae3ad330b87890337b19b348fbed38c53dc36c9ddfcde88.jpg new file mode 100644 index 0000000000000000000000000000000000000000..75a6378959d064f63ea24bcf00c324de7eb76b65 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05862/images/8d963780ab0d5919fae3ad330b87890337b19b348fbed38c53dc36c9ddfcde88.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32157136bee8937181d1659e2cd8acc1778a310211549d3a310bb3ba5ca9cd04 +size 47344 diff --git a/data/2025/2504_05xxx/2504.05862/images/a6b48ac0208aa2ebad6a9884585dd0814d5926805bfbac9e8cc672c73a856a67.jpg b/data/2025/2504_05xxx/2504.05862/images/a6b48ac0208aa2ebad6a9884585dd0814d5926805bfbac9e8cc672c73a856a67.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b44c1dceef1f344b4182f7ad717dd0949e0b068 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05862/images/a6b48ac0208aa2ebad6a9884585dd0814d5926805bfbac9e8cc672c73a856a67.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34cf9717506354d4fe2fc4814f752f3dac623a7f606a46c5eaa62aff76c4501a +size 34878 diff --git a/data/2025/2504_05xxx/2504.05862/images/c4b270fbbcb62fddb142afbc026f33c4b10b865d46fa8bd61bc31254260efa7f.jpg b/data/2025/2504_05xxx/2504.05862/images/c4b270fbbcb62fddb142afbc026f33c4b10b865d46fa8bd61bc31254260efa7f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a585418e5debf084019aa2efd32938b8a7fbec04 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05862/images/c4b270fbbcb62fddb142afbc026f33c4b10b865d46fa8bd61bc31254260efa7f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef9af66eb45020c01f4533ddf7e4471a0efc87b4e9306c0d981310198e6d5ee8 +size 28909 diff --git a/data/2025/2504_05xxx/2504.05862/images/d0bf440a8e47e435aae0fc2e57b40bd06a3e25f2e13d643069bc8f0b44f7c2b6.jpg b/data/2025/2504_05xxx/2504.05862/images/d0bf440a8e47e435aae0fc2e57b40bd06a3e25f2e13d643069bc8f0b44f7c2b6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..08b7b0bcd499198503c4c550822e7755635955da --- /dev/null +++ b/data/2025/2504_05xxx/2504.05862/images/d0bf440a8e47e435aae0fc2e57b40bd06a3e25f2e13d643069bc8f0b44f7c2b6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4af167173581fb37874c6a82718bf23da672ee2f6f60a59c44960c1722c83a5a +size 27937 diff --git a/data/2025/2504_05xxx/2504.05862/images/fd66e0a7cc774da4caaecdf2f74a7c28e16bf0343f962b2806eee36f86cbf795.jpg b/data/2025/2504_05xxx/2504.05862/images/fd66e0a7cc774da4caaecdf2f74a7c28e16bf0343f962b2806eee36f86cbf795.jpg new file mode 100644 index 0000000000000000000000000000000000000000..356e5fa854427e68fd26cf5b5746c024f0fdf40f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05862/images/fd66e0a7cc774da4caaecdf2f74a7c28e16bf0343f962b2806eee36f86cbf795.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:063359629e18c29470efcc5710a66f48e23d45e9289cd859e12ea6edf0a18ebd +size 77179 diff --git a/data/2025/2504_05xxx/2504.05862/layout.json b/data/2025/2504_05xxx/2504.05862/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..a118053f59ab5ad5badeb40ef4007c53d1b35e63 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05862/layout.json @@ -0,0 +1,10075 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 77, + 80, + 532, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 80, + 532, + 118 + ], + "spans": [ + { + "bbox": [ + 77, + 80, + 532, + 118 + ], + "type": "text", + "content": "Are Generative AI Agents Effective Personalized Financial Advisors?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 88, + 125, + 194, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 125, + 194, + 139 + ], + "spans": [ + { + "bbox": [ + 88, + 125, + 194, + 139 + ], + "type": "text", + "content": "Takehiro Takayanagi" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 73, + 139, + 211, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 139, + 211, + 150 + ], + "spans": [ + { + "bbox": [ + 73, + 139, + 211, + 150 + ], + "type": "text", + "content": "takayanagi-takehiro590@g.ecc.u-" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 118, + 151, + 165, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 151, + 165, + 162 + ], + "spans": [ + { + "bbox": [ + 118, + 151, + 165, + 162 + ], + "type": "text", + "content": "tokyo.ac.jp" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 91, + 163, + 192, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 163, + 192, + 174 + ], + "spans": [ + { + "bbox": [ + 91, + 163, + 192, + 174 + ], + "type": "text", + "content": "The University of Tokyo" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 113, + 175, + 168, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 175, + 168, + 186 + ], + "spans": [ + { + "bbox": [ + 113, + 175, + 168, + 186 + ], + "type": "text", + "content": "Tokyo, Japan" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 270, + 125, + 341, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 270, + 125, + 341, + 138 + ], + "spans": [ + { + "bbox": [ + 270, + 125, + 341, + 138 + ], + "type": "text", + "content": "Kiyoshi Izumi" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 252, + 139, + 359, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 252, + 139, + 359, + 150 + ], + "spans": [ + { + "bbox": [ + 252, + 139, + 359, + 150 + ], + "type": "text", + "content": "izumi@sys.t.u-tokyo.ac.jp" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 255, + 151, + 356, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 151, + 356, + 162 + ], + "spans": [ + { + "bbox": [ + 255, + 151, + 356, + 162 + ], + "type": "text", + "content": "The University of Tokyo" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 278, + 163, + 333, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 278, + 163, + 333, + 174 + ], + "spans": [ + { + "bbox": [ + 278, + 163, + 333, + 174 + ], + "type": "text", + "content": "Tokyo, Japan" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 417, + 125, + 520, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 417, + 125, + 520, + 137 + ], + "spans": [ + { + "bbox": [ + 417, + 125, + 520, + 137 + ], + "type": "text", + "content": "Javier Sanz-Cruzado" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 446, + 139, + 493, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 446, + 139, + 493, + 150 + ], + "spans": [ + { + "bbox": [ + 446, + 139, + 493, + 150 + ], + "type": "text", + "content": "javier.sanz-" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 411, + 151, + 529, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 411, + 151, + 529, + 162 + ], + "spans": [ + { + "bbox": [ + 411, + 151, + 529, + 162 + ], + "type": "text", + "content": "cruzadopuig@glasgow.ac.uk" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 424, + 163, + 516, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 424, + 163, + 516, + 174 + ], + "spans": [ + { + "bbox": [ + 424, + 163, + 516, + 174 + ], + "type": "text", + "content": "University of Glasgow" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 415, + 175, + 524, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 415, + 175, + 524, + 186 + ], + "spans": [ + { + "bbox": [ + 415, + 175, + 524, + 186 + ], + "type": "text", + "content": "Glasgow, United Kingdom" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 175, + 195, + 271, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 195, + 271, + 207 + ], + "spans": [ + { + "bbox": [ + 175, + 195, + 271, + 207 + ], + "type": "text", + "content": "Richard McCreadie" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 154, + 209, + 293, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 209, + 293, + 220 + ], + "spans": [ + { + "bbox": [ + 154, + 209, + 293, + 220 + ], + "type": "text", + "content": "richard.mccreadie@glasgow.ac.uk" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 176, + 221, + 269, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 221, + 269, + 232 + ], + "spans": [ + { + "bbox": [ + 176, + 221, + 269, + 232 + ], + "type": "text", + "content": "University of Glasgow" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 168, + 233, + 277, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 233, + 277, + 244 + ], + "spans": [ + { + "bbox": [ + 168, + 233, + 277, + 244 + ], + "type": "text", + "content": "Glasgow, United Kingdom" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 51, + 251, + 96, + 262 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 251, + 96, + 262 + ], + "spans": [ + { + "bbox": [ + 51, + 251, + 96, + 262 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 50, + 266, + 296, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 266, + 296, + 530 + ], + "spans": [ + { + "bbox": [ + 50, + 266, + 296, + 530 + ], + "type": "text", + "content": "Large language model-based agents are becoming increasingly popular as a low-cost mechanism to provide personalized, conversational advice, and have demonstrated impressive capabilities in relatively simple scenarios, such as movie recommendations. But how do these agents perform in complex high-stakes domains, where domain expertise is essential and mistakes carry substantial risk? This paper investigates the effectiveness of LLM-advisors in the finance domain, focusing on three distinct challenges: (1) eliciting user preferences when users themselves may be unsure of their needs, (2) providing personalized guidance for diverse investment preferences, and (3) leveraging advisor personality to build relationships and foster trust. Via a lab-based user study with 64 participants, we show that LLM-advisors often match human advisor performance when eliciting preferences, although they can struggle to resolve conflicting user needs. When providing personalized advice, the LLM was able to positively influence user behavior, but demonstrated clear failure modes. Our results show that accurate preference elicitation is key, otherwise, the LLM-advisor has little impact, or can even direct the investor toward unsuitable assets. More worryingly, users appear insensitive to the quality of advice being given, or worse these can have an inverse relationship. Indeed, users reported a preference for and increased satisfaction as well as emotional trust with LLMs adopting an extroverted persona, even though those agents provided worse advice." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 51, + 539, + 123, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 539, + 123, + 552 + ], + "spans": [ + { + "bbox": [ + 51, + 539, + 123, + 552 + ], + "type": "text", + "content": "CCS Concepts" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 50, + 554, + 296, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 554, + 296, + 575 + ], + "spans": [ + { + "bbox": [ + 50, + 554, + 296, + 575 + ], + "type": "text", + "content": "- Information systems " + }, + { + "bbox": [ + 50, + 554, + 296, + 575 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 50, + 554, + 296, + 575 + ], + "type": "text", + "content": " Decision support systems; Personalization." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 51, + 585, + 103, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 585, + 103, + 597 + ], + "spans": [ + { + "bbox": [ + 51, + 585, + 103, + 597 + ], + "type": "text", + "content": "Keywords" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 50, + 600, + 294, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 600, + 294, + 611 + ], + "spans": [ + { + "bbox": [ + 50, + 600, + 294, + 611 + ], + "type": "text", + "content": "large language models, financial advisor, user study, generative AI" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 50, + 617, + 295, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 617, + 295, + 675 + ], + "spans": [ + { + "bbox": [ + 50, + 617, + 295, + 675 + ], + "type": "text", + "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 52, + 675, + 170, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 675, + 170, + 684 + ], + "spans": [ + { + "bbox": [ + 52, + 675, + 170, + 684 + ], + "type": "text", + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 52, + 685, + 289, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 685, + 289, + 693 + ], + "spans": [ + { + "bbox": [ + 52, + 685, + 289, + 693 + ], + "type": "text", + "content": "© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 52, + 693, + 165, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 693, + 165, + 700 + ], + "spans": [ + { + "bbox": [ + 52, + 693, + 165, + 700 + ], + "type": "text", + "content": "ACM ISBN 978-x-xxxxx-xxxxx-x/YY/MM" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 52, + 700, + 174, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 700, + 174, + 709 + ], + "spans": [ + { + "bbox": [ + 52, + 700, + 174, + 709 + ], + "type": "text", + "content": "https://doi.org/10.1145/nnnnnnn.nnnnnnn" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 358, + 195, + 416, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 195, + 416, + 207 + ], + "spans": [ + { + "bbox": [ + 358, + 195, + 416, + 207 + ], + "type": "text", + "content": "Iadh Ounis" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 332, + 209, + 442, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 209, + 442, + 220 + ], + "spans": [ + { + "bbox": [ + 332, + 209, + 442, + 220 + ], + "type": "text", + "content": "iadh.ounis@glasgow.ac.uk" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 341, + 220, + 433, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 341, + 220, + 433, + 232 + ], + "spans": [ + { + "bbox": [ + 341, + 220, + 433, + 232 + ], + "type": "text", + "content": "University of Glasgow" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 333, + 233, + 441, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 233, + 441, + 244 + ], + "spans": [ + { + "bbox": [ + 333, + 233, + 441, + 244 + ], + "type": "text", + "content": "Glasgow, United Kingdom" + } + ] + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 329, + 250, + 544, + 351 + ], + "blocks": [ + { + "bbox": [ + 329, + 250, + 544, + 351 + ], + "lines": [ + { + "bbox": [ + 329, + 250, + 544, + 351 + ], + "spans": [ + { + "bbox": [ + 329, + 250, + 544, + 351 + ], + "type": "image", + "image_path": "4cee9a16e36b20686fedd91c3c5c33e55f7ed2b3b09d0c248c6e09e369f3f9f7.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 314, + 363, + 559, + 386 + ], + "lines": [ + { + "bbox": [ + 314, + 363, + 559, + 386 + ], + "spans": [ + { + "bbox": [ + 314, + 363, + 559, + 386 + ], + "type": "text", + "content": "Figure 1: Conceptual illustration of an LLM-advisor with two stages: (1) Preference Elicitation and (2) Advisory Discussion." + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_caption" + } + ], + "index": 35 + }, + { + "bbox": [ + 315, + 397, + 405, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 397, + 405, + 407 + ], + "spans": [ + { + "bbox": [ + 315, + 397, + 405, + 407 + ], + "type": "text", + "content": "ACM Reference Format:" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 314, + 407, + 560, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 407, + 560, + 448 + ], + "spans": [ + { + "bbox": [ + 314, + 407, + 560, + 448 + ], + "type": "text", + "content": "Takehiro Takayanagi, Kiyoshi Izumi, Javier Sanz-Cruzado, Richard McCreadie, and Iadh Ounis. 2025. Are Generative AI Agents Effective Personalized Financial Advisors?. In Proceedings of SIGIR 2025. ACM, New York, NY, USA, 11 pages. https://doi.org/10.1145/nnnnnnn.nnnnnnn" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 315, + 464, + 398, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 464, + 398, + 475 + ], + "spans": [ + { + "bbox": [ + 315, + 464, + 398, + 475 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 313, + 479, + 560, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 479, + 560, + 599 + ], + "spans": [ + { + "bbox": [ + 313, + 479, + 560, + 599 + ], + "type": "text", + "content": "Personalized advice plays a crucial role in our society, particularly in complex and high-stakes domains like healthcare and finance. Advisors and professionals in these fields use their expertise to offer personalized guidance and emotional support to their clients, leveraging people's specific preferences and/or circumstances. However, advisory services are often provided at a high cost, effectively excluding a large portion of the population from this critical advice. In the financial domain, to mitigate this issue, automated decision support systems have been widely studied, with a special focus on investment-related predictions, such as financial asset recommendations [30, 35]." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 313, + 600, + 560, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 600, + 560, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 600, + 560, + 710 + ], + "type": "text", + "content": "Recent advances in natural language processing and large language models (LLMs) have significantly accelerated the development of conversational agents, presenting the potential to function as personalized assistants for information-seeking and decision-making [41]. These agents can now leverage multi-turn dialogues, enabling dynamic, mixed-initiative interactions where both users and systems can take the lead in conversations [1]. This progression has expanded the application of conversational agents to various tasks, such as recommendation, question answering, and search [12, 27, 34, 41]." + } + ] + } + ], + "index": 41 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.05862v2 [cs.AI] 15 Apr 2025" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 294, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 294, + 227 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 294, + 227 + ], + "type": "text", + "content": "The application of these conversational agents for financial decision-making represents a much more complex scenario than others like movie recommendations, because users are not necessarily familiar with the basic terminology and concepts in this space, and mistakes carry a substantial risk that can lead to large monetary losses. While there is a growing interest in building these conversational assistants to provide automated financial advice [21], previous work has mostly targeted agents capable of handling simple inquiries [18, 36, 37]. Compared to these simple systems, helping users navigate financial decisions and market uncertainties poses a much greater challenge. Therefore, it is not yet clear how to develop systems that effectively support complex financial information-seeking and decision-making tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 227, + 294, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 227, + 294, + 304 + ], + "spans": [ + { + "bbox": [ + 50, + 227, + 294, + 304 + ], + "type": "text", + "content": "This work aims to close this gap by exploring the effectiveness of LLMs to act as personalized financial advisory agents. In particular, we focus on three problems: (a) eliciting investor preferences through interactive conversations, (b) providing personalized guidance to help users determine whether particular financial assets align with their preferences, and (c) leveraging the personality of the advisor to foster trust on the advisor." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 304, + 294, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 304, + 294, + 380 + ], + "spans": [ + { + "bbox": [ + 50, + 304, + 294, + 380 + ], + "type": "text", + "content": "First, the financial literature emphasizes that eliciting user preferences is central to delivering suitable advice [33]. However, it remains unclear whether current conversational technologies, particularly those powered by LLMs, can correctly elicit user preferences in specialized domains where users struggle to articulate their needs. Our work addresses this challenge in the context of financial services." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 380, + 294, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 380, + 294, + 456 + ], + "spans": [ + { + "bbox": [ + 50, + 380, + 294, + 456 + ], + "type": "text", + "content": "Second, although personalization is widely regarded as important in the financial decision-support literature [30, 35], its value in a conversational setting remains uncertain. In particular, we explore whether tailoring dialogue around a user's profile and context improves financial decision-making. Additionally, we also explore how personalization influences user perceptions of the advisor, in terms of aspects like trust and satisfaction." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 457, + 294, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 457, + 294, + 534 + ], + "spans": [ + { + "bbox": [ + 50, + 457, + 294, + 534 + ], + "type": "text", + "content": "Finally, in personalized advisory settings within high-stakes domains, the relationship and trust between the client and advisor play a crucial role [21]. Research on conversational agents suggests that agent personality significantly affects users' perceptions of the system [4, 32]. However, it remains unclear how an advisor's personality in the financial domain influences both the quality of users' financial decisions and their overall experience." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 61, + 534, + 294, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 534, + 294, + 545 + ], + "spans": [ + { + "bbox": [ + 61, + 534, + 294, + 545 + ], + "type": "text", + "content": "To summarize, in this paper, we explore the following questions:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 556, + 293, + 619 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 67, + 556, + 293, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 556, + 293, + 576 + ], + "spans": [ + { + "bbox": [ + 67, + 556, + 293, + 576 + ], + "type": "text", + "content": "- RQ1: Can LLM-advisors effectively elicit user preferences through conversation?" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 578, + 293, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 578, + 293, + 597 + ], + "spans": [ + { + "bbox": [ + 67, + 578, + 293, + 597 + ], + "type": "text", + "content": "- RQ2: Does personalization lead to better investment decisions and a more positive advisor assessment?" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 600, + 293, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 600, + 293, + 619 + ], + "spans": [ + { + "bbox": [ + 67, + 600, + 293, + 619 + ], + "type": "text", + "content": "- RQ3: Do different personality traits affect decision quality and advisor assessment?" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 50, + 633, + 294, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 633, + 294, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 633, + 294, + 710 + ], + "type": "text", + "content": "To address these questions, we conduct a lab-based user study that explores the effectiveness of LLMs as interactive conversational financial advisors, on which we simulate realistic investment scenarios using investor narratives and stock relevance scores curated by financial experts. Figure 1 illustrates an example conversation with the advisor, divided into two stages: first, the LLM-advisor attempts to capture the investor preferences through conversation; in" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 84, + 559, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 84, + 559, + 150 + ], + "spans": [ + { + "bbox": [ + 313, + 84, + 559, + 150 + ], + "type": "text", + "content": "the second stage, given an individual asset, the advisor provides information about it to the investor, including how the asset matches (or not) the investor's preferences. To answer the different questions, we compare different configurations of the LLM-advisor: first, we compare personalized vs. non-personalized advisors, and, then, we compare two personalized advisors with distinct personalities." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 160, + 403, + 172 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 160, + 403, + 172 + ], + "spans": [ + { + "bbox": [ + 314, + 160, + 403, + 172 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 176, + 552, + 188 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 176, + 552, + 188 + ], + "spans": [ + { + "bbox": [ + 314, + 176, + 552, + 188 + ], + "type": "text", + "content": "2.1 Personalization and Preference Elicitation" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 191, + 559, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 191, + 559, + 355 + ], + "spans": [ + { + "bbox": [ + 313, + 191, + 559, + 355 + ], + "type": "text", + "content": "Information systems, especially those focused on search and recommendation benefit from personalization [16]. Specifically, personalization techniques play a crucial role in enhancing user experience [19, 25, 42]. Interactive approaches, such as conversational preference elicitation represent the frontier of personalization. This problem has received growing attention, as advances in generative AI now provide a functional mechanism to collect user preferences dynamically in a free-form manner [41]. This interactive approach can capture more diverse and targeted insights than static approaches like questionnaires [7, 12, 26, 27, 34]. Indeed, recent studies have proposed various methods for effective conversational preference elicitation [34, 43], as well as user studies on the perceived quality of this process in domains such as e-commerce, movies, fashion, books, travel, and restaurant recommendations [2, 8, 17, 26, 34, 46]." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 356, + 559, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 356, + 559, + 542 + ], + "spans": [ + { + "bbox": [ + 313, + 356, + 559, + 542 + ], + "type": "text", + "content": "However, we argue that for some important domains, trying to directly collect preferences is insufficient. An implicit assumption of these studies is that if directly asked, the user will be able to accurately express their preferences. It is reasonable to expect that this assumption would hold for scenarios like movie recommendation; we can ask a user \"do you like horror movies?\" and expect a useful response. On the other hand, this will not hold for complex tasks, where the user lacks the knowledge to form an accurate response [12, 40]. For instance, in an investment context if we asked \"do you prefer ETFs or Bonds?\", it is not clear that an inexperienced user would be able to produce a meaningful answer. In these cases, an ideal agent needs to fill the gaps in the user knowledge through conversation, as well as infer the user preferences across multiple (often uncertain) user responses. But how effective are generative AI agents at this complex task? This paper aims to answer that question for the domain of financial advisory; a particularly challenging domain given its technical nature and high risks if done poorly." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 314, + 552, + 434, + 565 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 552, + 434, + 565 + ], + "spans": [ + { + "bbox": [ + 314, + 552, + 434, + 565 + ], + "type": "text", + "content": "2.2 Financial advisory" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 567, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 567, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 567, + 559, + 710 + ], + "type": "text", + "content": "In the financial domain, advisors help individuals manage their personal finances by offering guidance on investments and assisting with decision-making. While financial advisors can be beneficial, their services often come at a high cost, making them unaffordable for many people. To mitigate this issue, automated (nonconversational) financial decision support systems such as financial recommender systems have been widely studied [45]. The majority of research in this area has been focused on how to find profitable assets (i.e. those that will make money if we invest in them). These works assume a simplified user-model, where an investor is only concerned with maximizing return-on-investment over a fixed period of time [30, 35]. These studies frame financial advisory as a ranking problem, where the goal is to rank financial assets for a user" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 172, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 172, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 172, + 69 + ], + "type": "text", + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 507, + 60, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 507, + 60, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 507, + 60, + 558, + 68 + ], + "type": "text", + "content": "Takayanagi et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 93, + 82, + 519, + 178 + ], + "blocks": [ + { + "bbox": [ + 93, + 82, + 519, + 178 + ], + "lines": [ + { + "bbox": [ + 93, + 82, + 519, + 178 + ], + "spans": [ + { + "bbox": [ + 93, + 82, + 519, + 178 + ], + "type": "image", + "image_path": "386ffde58c6627098c75cf4e90481d55e10c3880e9b1334aba927ea8a1c86fd4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 194, + 559, + 217 + ], + "lines": [ + { + "bbox": [ + 50, + 194, + 559, + 217 + ], + "spans": [ + { + "bbox": [ + 50, + 194, + 559, + 217 + ], + "type": "text", + "content": "Figure 2: Example of an investor profile, investment preferences, and ground truth ranking. Dashed line components are used for evaluation (and therefore, they are not shown to the user/LLM)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 221, + 294, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 221, + 294, + 276 + ], + "spans": [ + { + "bbox": [ + 50, + 221, + 294, + 276 + ], + "type": "text", + "content": "over a specified time period. However, a recent study suggests that a large part of the value offered by human financial advisors stems from their ability to personalize investment guidance to clients' specific needs, build relationships, and foster trust [15], rather than simply presenting suitable assets." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 277, + 295, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 277, + 295, + 430 + ], + "spans": [ + { + "bbox": [ + 50, + 277, + 295, + 430 + ], + "type": "text", + "content": "Reflecting on these findings, the development of conversational financial advisors has drawn increasing attention, as it enables a dynamic understanding of users' needs, personalized guidance, and the potential to build trustworthy relationships [3, 9, 11, 18, 44]. In particular, the conversational agents' personality has gained attention as a factor that can help build relationships with clients and foster trust [21], especially given the successes of conversational agents using the Big Five personality model [23] to enhance the end-user experience [5, 33]. Although conversational agents show potential in finance, how to configure them to match the value of human advisors remains unclear. Therefore, we conduct a user study to examine how personalizing investment guidance and the advisor's personality shape users' financial decision-making effectiveness and overall user experience." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 443, + 135, + 455 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 443, + 135, + 455 + ], + "spans": [ + { + "bbox": [ + 51, + 443, + 135, + 455 + ], + "type": "text", + "content": "3 Methodology" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 457, + 295, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 457, + 295, + 545 + ], + "spans": [ + { + "bbox": [ + 50, + 457, + 295, + 545 + ], + "type": "text", + "content": "In this paper we aim to determine to what extent current generative language models can act as an effective financial advisor. Indeed, given the need to personalize for the user, emotional implications, the technical nature of the information-seeking task, and high impact if failed, we argue that this is an excellent test case for the limits of generative large language models. To structure our evaluation, we divide our study into two phases, as illustrated in Figure 1, where we evaluate the success of both:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 551, + 295, + 682 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 62, + 551, + 295, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 551, + 295, + 614 + ], + "spans": [ + { + "bbox": [ + 62, + 551, + 295, + 614 + ], + "type": "text", + "content": "(1) Preference Elicitation: During this stage, we have the LLM-advisor hold a natural language conversation with a human, where it is directed to collect information regarding the person's investment preferences. The human in this interaction is pretending to have preferences from a given investor profile." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 616, + 294, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 616, + 294, + 682 + ], + "spans": [ + { + "bbox": [ + 62, + 616, + 294, + 682 + ], + "type": "text", + "content": "(2) Advisory Discussion: During the advisory discussion, the LLM-advisor again has a natural language conversation with the human (acting on an investor profile), where the human collects information about whether a company is a suitable investment for them. This is repeated for multiple companies per investor profile." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 50, + 688, + 294, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 688, + 294, + 709 + ], + "spans": [ + { + "bbox": [ + 50, + 688, + 294, + 709 + ], + "type": "text", + "content": "We provide preparatory information and discuss each stage in more detail below:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 315, + 220, + 424, + 231 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 220, + 424, + 231 + ], + "spans": [ + { + "bbox": [ + 315, + 220, + 424, + 231 + ], + "type": "text", + "content": "3.1 Investor Profiles" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 235, + 559, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 235, + 559, + 344 + ], + "spans": [ + { + "bbox": [ + 313, + 235, + 559, + 344 + ], + "type": "text", + "content": "To fairly evaluate the ability of any LLM-advisor, we need to have them interact with human users with real needs. Given the open-ended nature of free-form conversations, it is desirable to repeat each experiment with different people such that we can observe variances in conversation paths, as those variances may influence task success. However, to enable repeatability, we need to hold the investor needs constant across repetitions. Hence, we define three archetypal investor profiles " + }, + { + "bbox": [ + 313, + 235, + 559, + 344 + ], + "type": "inline_equation", + "content": "i \\in I" + }, + { + "bbox": [ + 313, + 235, + 559, + 344 + ], + "type": "text", + "content": " based on input from a financial expert, where our human participants are given one to follow when conversing with the LLM-advisor:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 331, + 349, + 559, + 448 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 331, + 349, + 559, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 349, + 559, + 380 + ], + "spans": [ + { + "bbox": [ + 331, + 349, + 559, + 380 + ], + "type": "text", + "content": "- Investor 1: Growth-Oriented Healthcare Enthusiast: Prefers healthcare innovations, values high-growth opportunities, and takes measured risks." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 331, + 382, + 558, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 382, + 558, + 415 + ], + "spans": [ + { + "bbox": [ + 331, + 382, + 558, + 415 + ], + "type": "text", + "content": "- Investor 2: Conservative Income Seeker: Seeks stable returns, invests in well-established companies, values regular dividend payouts." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 331, + 415, + 559, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 415, + 559, + 448 + ], + "spans": [ + { + "bbox": [ + 331, + 415, + 559, + 448 + ], + "type": "text", + "content": "- Investor 3: Risk-taking Value Investor: Targets undervalued companies with strong long-term potential, tolerates short-term volatility, and invests in cyclical sectors." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 452, + 559, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 452, + 559, + 595 + ], + "spans": [ + { + "bbox": [ + 313, + 452, + 559, + 595 + ], + "type": "text", + "content": "For each of these investor profiles, we select three key investment preferences, chosen from well-known investment characteristics such as industry sector, stock style, consistency in dividend payments, and sensitivity to global market changes [10]. We denote the set of investor preferences as " + }, + { + "bbox": [ + 313, + 452, + 559, + 595 + ], + "type": "inline_equation", + "content": "i^{pref}" + }, + { + "bbox": [ + 313, + 452, + 559, + 595 + ], + "type": "text", + "content": ". In our experiments, we simulate a realistic elicitation scenario where the advisor collects the preferences from the participants. Therefore, we do not straightforwardly provide the preferences to the participants. Instead, we present them as text narratives of between 150 to 200 words. A financial expert was consulted to confirm the quality and reliability of these narratives. An example narrative representing Investor 2 is illustrated in Figure 2, where we highlight the sentences referring to specific investor preferences." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 606, + 491, + 618 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 606, + 491, + 618 + ], + "spans": [ + { + "bbox": [ + 315, + 606, + 491, + 618 + ], + "type": "text", + "content": "3.2 Stage 1: Preference Elicitation" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 621, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 621, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 621, + 559, + 710 + ], + "type": "text", + "content": "The goal of stage 1 of our study is to determine to what extent an LLM-advisor can effectively collect a user's investment preferences through conversation. Formally, given a participant of the user study " + }, + { + "bbox": [ + 313, + 621, + 559, + 710 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 313, + 621, + 559, + 710 + ], + "type": "text", + "content": " and an investor profile " + }, + { + "bbox": [ + 313, + 621, + 559, + 710 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 313, + 621, + 559, + 710 + ], + "type": "text", + "content": ", during the elicitation stage, the LLM-advisor aims to obtain an approximated set of preferences, denoted " + }, + { + "bbox": [ + 313, + 621, + 559, + 710 + ], + "type": "inline_equation", + "content": "i_u^{LLM}" + }, + { + "bbox": [ + 313, + 621, + 559, + 710 + ], + "type": "text", + "content": ", that matches the investor preferences (" + }, + { + "bbox": [ + 313, + 621, + 559, + 710 + ], + "type": "inline_equation", + "content": "i^{pref}" + }, + { + "bbox": [ + 313, + 621, + 559, + 710 + ], + "type": "text", + "content": "). To achieve this, the generative model produces a series of questions that participants answer by interpreting the investor narrative." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 248, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 248, + 68 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 248, + 68 + ], + "type": "text", + "content": "Are Generative AI Agents Effective Personalized Financial Advisors?" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 438, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 438, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 438, + 60, + 559, + 69 + ], + "type": "text", + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 294, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 294, + 120 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 294, + 120 + ], + "type": "text", + "content": "Responses to those questions, denoted as " + }, + { + "bbox": [ + 50, + 84, + 294, + 120 + ], + "type": "inline_equation", + "content": "R_{i}^{u}" + }, + { + "bbox": [ + 50, + 84, + 294, + 120 + ], + "type": "text", + "content": ", are used by the LLM-advisor to generate the user profile " + }, + { + "bbox": [ + 50, + 84, + 294, + 120 + ], + "type": "inline_equation", + "content": "i_{u}^{LLM}" + }, + { + "bbox": [ + 50, + 84, + 294, + 120 + ], + "type": "text", + "content": ". Success is then measured by manually evaluating the overlap between " + }, + { + "bbox": [ + 50, + 84, + 294, + 120 + ], + "type": "inline_equation", + "content": "i^{pref}" + }, + { + "bbox": [ + 50, + 84, + 294, + 120 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 84, + 294, + 120 + ], + "type": "inline_equation", + "content": "i_{u}^{LLM}" + }, + { + "bbox": [ + 50, + 84, + 294, + 120 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 120, + 295, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 120, + 295, + 219 + ], + "spans": [ + { + "bbox": [ + 50, + 120, + 295, + 219 + ], + "type": "text", + "content": "For user elicitation, we adopted a System-Ask-User-Respond (SAUR) paradigm [43]. During the conversation, the advisor proactively inquires about the user's preferences given a set of target preferences (e.g., industry type, acceptable risk). After the human participant responds to a question, the LLM-advisor checks whether the collected preferences cover all of the target preferences. If the advisor is confident that they do, it ends the conversation and prompts the user to proceed to the next stage; otherwise, it continues asking follow-up questions in a loop." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 228, + 220, + 240 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 228, + 220, + 240 + ], + "spans": [ + { + "bbox": [ + 51, + 228, + 220, + 240 + ], + "type": "text", + "content": "3.3 Stage 2: Advisory Discussion" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 242, + 295, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 242, + 295, + 363 + ], + "spans": [ + { + "bbox": [ + 50, + 242, + 295, + 363 + ], + "type": "text", + "content": "Stage 2 of our study investigates to what extent an LLM-advisor can provide the same benefits as a real human advisor when exploring investment options. Note that the goal here is not to have the LLM-advisor promote any one asset, but rather to provide accurate and meaningful information such that the human can find the best investment opportunity for them. To this end, we structure our experiment such that the human (acting on an investor profile) has one conversation with the LLM-advisor for each of a set of assets being considered.1 After all assets are presented to the participant, a stock ranking is generated by sorting the stocks by the participant rating in descending order." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 363, + 295, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 363, + 295, + 496 + ], + "spans": [ + { + "bbox": [ + 50, + 363, + 295, + 496 + ], + "type": "text", + "content": "Importantly, as we know the investor profile " + }, + { + "bbox": [ + 50, + 363, + 295, + 496 + ], + "type": "inline_equation", + "content": "i^{pref}" + }, + { + "bbox": [ + 50, + 363, + 295, + 496 + ], + "type": "text", + "content": " for each conversation about an asset " + }, + { + "bbox": [ + 50, + 363, + 295, + 496 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 50, + 363, + 295, + 496 + ], + "type": "text", + "content": ", we can objectively determine whether " + }, + { + "bbox": [ + 50, + 363, + 295, + 496 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 50, + 363, + 295, + 496 + ], + "type": "text", + "content": " is a good investment given " + }, + { + "bbox": [ + 50, + 363, + 295, + 496 + ], + "type": "inline_equation", + "content": "i^{pref}" + }, + { + "bbox": [ + 50, + 363, + 295, + 496 + ], + "type": "text", + "content": ", forming a ground truth against which we can compare to the rating provided by our human participant after their conversation with the LLM-advisor. For each asset " + }, + { + "bbox": [ + 50, + 363, + 295, + 496 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 50, + 363, + 295, + 496 + ], + "type": "text", + "content": ", a financial expert produced a score between 0 and 3 by manually checking whether " + }, + { + "bbox": [ + 50, + 363, + 295, + 496 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 50, + 363, + 295, + 496 + ], + "type": "text", + "content": " satisfied each of the three investment criteria contained in " + }, + { + "bbox": [ + 50, + 363, + 295, + 496 + ], + "type": "inline_equation", + "content": "i^{pref}" + }, + { + "bbox": [ + 50, + 363, + 295, + 496 + ], + "type": "text", + "content": ". A ground-truth ranking was produced by sorting the assets by the expert scores. We show an example of the ranking construction in Figure 2. During evaluation, the closer the participant ranking is to the ranking produced by expert judgments, the better the LLM-advisor performed." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 502, + 295, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 502, + 295, + 568 + ], + "spans": [ + { + "bbox": [ + 50, + 502, + 295, + 568 + ], + "type": "text", + "content": "Baseline Prompt: As we are working with an LLM-advisor and the nature of financial information-seeking is time-sensitive, we need to provide any information that might change over time to the LLM within the prompt. As such, for each asset " + }, + { + "bbox": [ + 50, + 502, + 295, + 568 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 50, + 502, + 295, + 568 + ], + "type": "text", + "content": ", we pre-prepared a standard asset descriptor block after consulting with a financial expert, containing:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 570, + 294, + 647 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 67, + 570, + 293, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 570, + 293, + 590 + ], + "spans": [ + { + "bbox": [ + 67, + 570, + 293, + 590 + ], + "type": "text", + "content": "Stock Prices: We collect monthly stock prices from 2023 using Yahoo! Finance.2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 592, + 294, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 592, + 294, + 613 + ], + "spans": [ + { + "bbox": [ + 67, + 592, + 294, + 613 + ], + "type": "text", + "content": "- Business Summary: We gather each company's business overview from Yahoo! Finance." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 614, + 294, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 614, + 294, + 647 + ], + "spans": [ + { + "bbox": [ + 67, + 614, + 294, + 647 + ], + "type": "text", + "content": "- Recent Performance and Key Financial Indicators (e.g., EPS): We obtain earnings conference call transcripts3 from Seeking Alpha for the last quarter of 2023." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 323, + 83, + 549, + 256 + ], + "blocks": [ + { + "bbox": [ + 323, + 83, + 549, + 256 + ], + "lines": [ + { + "bbox": [ + 323, + 83, + 549, + 256 + ], + "spans": [ + { + "bbox": [ + 323, + 83, + 549, + 256 + ], + "type": "image", + "image_path": "fd66e0a7cc774da4caaecdf2f74a7c28e16bf0343f962b2806eee36f86cbf795.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 374, + 266, + 499, + 277 + ], + "lines": [ + { + "bbox": [ + 374, + 266, + 499, + 277 + ], + "spans": [ + { + "bbox": [ + 374, + 266, + 499, + 277 + ], + "type": "text", + "content": "Figure 3: User study structure." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 282, + 559, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 282, + 559, + 326 + ], + "spans": [ + { + "bbox": [ + 314, + 282, + 559, + 326 + ], + "type": "text", + "content": "The advisor using this prompt acts as our baseline for the advisory discussion study. We augment this baseline with additional context and instructions to form two additional experimental scenarios, discussed below:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 332, + 559, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 332, + 559, + 399 + ], + "spans": [ + { + "bbox": [ + 313, + 332, + 559, + 399 + ], + "type": "text", + "content": "+Personalization: As discussed earlier, one of the core roles of the financial advisor is to personalize to the individual customer, based on their financial situation, needs, and preferences. To enable the LLM-advisor to personalize for the user, we integrate the generated profile from the preference elicitation (Stage 1) " + }, + { + "bbox": [ + 313, + 332, + 559, + 399 + ], + "type": "inline_equation", + "content": "i_u^{LLM}" + }, + { + "bbox": [ + 313, + 332, + 559, + 399 + ], + "type": "text", + "content": " into the prompt. We represent each preference as a series of short sentences." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 403, + 559, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 403, + 559, + 502 + ], + "spans": [ + { + "bbox": [ + 313, + 403, + 559, + 502 + ], + "type": "text", + "content": "+Personality: In Section 2.2 we discussed how human financial advisors provide emotional support as well as financial advice. While it is unlikely that an LLM-advisor could do this as well as a human (it lacks both emotional intelligence and non-conversational clues to the customer's mental state [39]) it might be possible to provide a better end-user experience by directing the LLM-advisor to adopt a personality. As noted in Section 2 it is possible to do this via prompt engineering, such as instructing the LLM to take on the traits of one or more of the Big-Five personality types [23]." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 502, + 558, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 502, + 558, + 546 + ], + "spans": [ + { + "bbox": [ + 313, + 502, + 558, + 546 + ], + "type": "text", + "content": "As we are performing a user study with humans, it would be impractical to exhaustively test every combination of personality types, hence as an initial investigation we experiment with two distinct personality profiles [32]:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 331, + 549, + 559, + 592 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 331, + 549, + 559, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 549, + 559, + 570 + ], + "spans": [ + { + "bbox": [ + 331, + 549, + 559, + 570 + ], + "type": "text", + "content": "- Extroverted: High in extroversion, agreeableness, and openness; low in conscientiousness and neuroticism." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 331, + 571, + 559, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 571, + 559, + 592 + ], + "spans": [ + { + "bbox": [ + 331, + 571, + 559, + 592 + ], + "type": "text", + "content": "- Conscientious: Low in extroversion, agreeableness, and openness; high in conscientiousness and neuroticism." + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 594, + 559, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 594, + 559, + 683 + ], + "spans": [ + { + "bbox": [ + 313, + 594, + 559, + 683 + ], + "type": "text", + "content": "We adopted the prompting method from Jiang et al. (2024) to assign a Big Five personality trait to the LLM agent [14], choosing it for its simplicity and effectiveness among various proposed approaches for embedding personality in LLMs (including both prompting and fine-tuning) [13, 14, 31]. To ensure a high standard of professionalism and accurate representation of the intended personality, we consulted financial professionals to review the texts generated by LLMs adopting both personas." + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 173, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 173, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 173, + 68 + ], + "type": "text", + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 507, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 507, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 507, + 60, + 558, + 69 + ], + "type": "text", + "content": "Takayanagi et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 653, + 295, + 670 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 653, + 295, + 670 + ], + "spans": [ + { + "bbox": [ + 50, + 653, + 295, + 670 + ], + "type": "text", + "content": "These were manually selected, however in a production environment these might be produced by an asset recommendation system." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 50, + 670, + 295, + 695 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 670, + 295, + 695 + ], + "spans": [ + { + "bbox": [ + 50, + 670, + 295, + 695 + ], + "type": "text", + "content": "2The scenario for the financial advising of our user study is set to December 30, 2023. By basing our experiment at the end of 2023, we avoid the problem of data contamination [28]." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 50, + 694, + 294, + 720 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 694, + 294, + 720 + ], + "spans": [ + { + "bbox": [ + 50, + 694, + 294, + 720 + ], + "type": "text", + "content": "3Earnings conference calls, hosted by publicly traded companies, discuss key aspects of their earnings reports and future goals with financial analysts and investors, thus covering critical financial indicators and recent performance insights [24]. These" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 691, + 559, + 710 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 691, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 314, + 691, + 559, + 710 + ], + "type": "text", + "content": "transcripts cover significant financial indicators and provide explanations of recent performance." + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 83, + 183, + 96 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 83, + 183, + 96 + ], + "spans": [ + { + "bbox": [ + 51, + 83, + 183, + 96 + ], + "type": "text", + "content": "3.4 Experimental Design" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 99, + 294, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 99, + 294, + 175 + ], + "spans": [ + { + "bbox": [ + 50, + 99, + 294, + 175 + ], + "type": "text", + "content": "In our experiment, we conducted two studies: a personalization study (for RQ2) and an advisor persona study (for RQ3). In the personalization study, participants compared a non-personalized (Baseline) advisor with a personalized (+Personalized) version. In the advisor persona study, they compared different LLM-advisor personality types (+Extroverted vs. +Conscientious). Participants are randomly assigned to one of these two studies." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 175, + 295, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 175, + 295, + 198 + ], + "spans": [ + { + "bbox": [ + 51, + 175, + 295, + 198 + ], + "type": "text", + "content": "Figure 3 shows the structure of our user study for a single participant, comprising seven steps:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 61, + 209, + 308, + 597 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 61, + 209, + 308, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 209, + 308, + 253 + ], + "spans": [ + { + "bbox": [ + 61, + 209, + 308, + 253 + ], + "type": "text", + "content": "(1) Participant Training: Participants are given a general overview of the user study and given instructions on their expected roles during preference elicitation, advisory discussions, asset ranking, and advisor assessment." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 61, + 253, + 295, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 253, + 295, + 286 + ], + "spans": [ + { + "bbox": [ + 61, + 253, + 295, + 286 + ], + "type": "text", + "content": "(2) Investor Profile Allocation: The user " + }, + { + "bbox": [ + 61, + 253, + 295, + 286 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 61, + 253, + 295, + 286 + ], + "type": "text", + "content": " is randomly allocated one of the investor profiles (See Section 3.1) that they will follow. Each profile is assigned to 42 participants." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 286, + 295, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 286, + 295, + 351 + ], + "spans": [ + { + "bbox": [ + 62, + 286, + 295, + 351 + ], + "type": "text", + "content": "(3) Preference Elicitation (Stage 1): The participant interacts with the LLM-advisor as if they were a new investor. The conversation ends once the LLM-advisor determines that they know enough about the investor to personalize for them. The median time spent on preference elicitation was 5 minutes and 11 seconds." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 353, + 295, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 353, + 295, + 421 + ], + "spans": [ + { + "bbox": [ + 62, + 353, + 295, + 421 + ], + "type": "text", + "content": "(4) Response Summarization: Given the aggregator of user responses " + }, + { + "bbox": [ + 62, + 353, + 295, + 421 + ], + "type": "inline_equation", + "content": "R_{i}^{u}" + }, + { + "bbox": [ + 62, + 353, + 295, + 421 + ], + "type": "text", + "content": ", we instruct an LLM to generate an investor profile " + }, + { + "bbox": [ + 62, + 353, + 295, + 421 + ], + "type": "inline_equation", + "content": "i_{u}^{LLM}" + }, + { + "bbox": [ + 62, + 353, + 295, + 421 + ], + "type": "text", + "content": ". For each investor preference in " + }, + { + "bbox": [ + 62, + 353, + 295, + 421 + ], + "type": "inline_equation", + "content": "i^{pref}" + }, + { + "bbox": [ + 62, + 353, + 295, + 421 + ], + "type": "text", + "content": ", if there is any relevant information in the responses " + }, + { + "bbox": [ + 62, + 353, + 295, + 421 + ], + "type": "inline_equation", + "content": "R_{i}^{u}" + }, + { + "bbox": [ + 62, + 353, + 295, + 421 + ], + "type": "text", + "content": ", that information is included in " + }, + { + "bbox": [ + 62, + 353, + 295, + 421 + ], + "type": "inline_equation", + "content": "i_{u}^{LLM}" + }, + { + "bbox": [ + 62, + 353, + 295, + 421 + ], + "type": "text", + "content": ". Otherwise, " + }, + { + "bbox": [ + 62, + 353, + 295, + 421 + ], + "type": "inline_equation", + "content": "i_{u}^{LLM}" + }, + { + "bbox": [ + 62, + 353, + 295, + 421 + ], + "type": "text", + "content": " indicates that no relevant information is available for that specific preference." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 421, + 295, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 421, + 295, + 541 + ], + "spans": [ + { + "bbox": [ + 62, + 421, + 295, + 541 + ], + "type": "text", + "content": "(5) Advisory Discussion (Stage 2): To simplify the conversation flow we have the participant hold separate conversations with the LLM-advisor for each asset they might invest in. The LLM-advisor is provided with context about the current asset (see Section 3.3), and depending on the experimental scenario, optionally personalization information (step 4 output) and/or a target personality context statement. Each conversation continues until the user is satisfied that they have enough information to rate the asset. The order in which the assets are discussed is randomly assigned to avoid position bias." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 541, + 295, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 541, + 295, + 597 + ], + "spans": [ + { + "bbox": [ + 62, + 541, + 295, + 597 + ], + "type": "text", + "content": "(6) Asset Ranking and Feedback: Participants rank all the stocks (four in total) discussed in the advisory session according to their desire to invest in each. They also assess the advisor they interacted with using a 7-point Likert scale for the items listed in Table 1 (see Section 4)." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 50, + 609, + 295, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 609, + 295, + 665 + ], + "spans": [ + { + "bbox": [ + 50, + 609, + 295, + 665 + ], + "type": "text", + "content": "To enable more effective pair-wise comparison of LLM-advisor variants, we have each participant test two variants per study. If the user has only tested one variant at this point, then they repeat the user study (starting at step 2) with the second variant. The order in which participants experience each variant is randomly assigned." + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 317, + 115, + 559, + 208 + ], + "blocks": [ + { + "bbox": [ + 315, + 83, + 560, + 105 + ], + "lines": [ + { + "bbox": [ + 315, + 83, + 560, + 105 + ], + "spans": [ + { + "bbox": [ + 315, + 83, + 560, + 105 + ], + "type": "text", + "content": "Table 1: Operational definitions used in the advisor assessment questionnaire for all response dimensions." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 115, + 559, + 208 + ], + "lines": [ + { + "bbox": [ + 317, + 115, + 559, + 208 + ], + "spans": [ + { + "bbox": [ + 317, + 115, + 559, + 208 + ], + "type": "table", + "html": "
Response DimensionOperational Definition
Perceived Personalization [16]The advisor understands my needs.
Emotional Trust [16]I feel content about relying on this advisor for my decisions.
Trust in Competence [16]The advisor has good knowledge of the stock.
Intention to Use [16]I am willing to use this advisor as an aid to help with my decision about which stock to purchase.
Perceived Usefulness [25]The advisor gave me good suggestions.
Overall Satisfaction [25]Overall, I am satisfied with the advisor.
Information Provision [38]The advisor provides the financial knowledge needed.
", + "image_path": "8d963780ab0d5919fae3ad330b87890337b19b348fbed38c53dc36c9ddfcde88.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 314, + 217, + 558, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 217, + 558, + 239 + ], + "spans": [ + { + "bbox": [ + 314, + 217, + 558, + 239 + ], + "type": "text", + "content": "In our experiments, we use Llama-3.1 8B as the background model for all our LLM-advisor variants." + }, + { + "bbox": [ + 314, + 217, + 558, + 239 + ], + "type": "inline_equation", + "content": "^4" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 249, + 403, + 262 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 249, + 403, + 262 + ], + "spans": [ + { + "bbox": [ + 315, + 249, + 403, + 262 + ], + "type": "text", + "content": "3.5 Participants" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 263, + 560, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 263, + 560, + 439 + ], + "spans": [ + { + "bbox": [ + 313, + 263, + 560, + 439 + ], + "type": "text", + "content": "We recruited 64 participants from the authors' affiliated university for our study: 32 participants for the personalization study and 32 participants for the advisor persona study, utilizing the university's online platform and blackboard for recruitment. Participants were required to be fluent in English, over 18 years old, and have an interest in finance and investment, mirroring the target demographic of our system's users. After excluding invalid data, 29 participants remained in the personalization study and 31 in the advisor persona study. We conducted a power analysis using the Wilcoxon signed-rank test for matched pairs, with the experimental conditions as the independent variable and users' response to the advisor assessment questionnaire as the dependent variable [29]. The analysis determined that 29 participants are needed to observe a statistically significant effect on user-perceived quality. Our recruitment criteria and compensation (£10/hour) for approximately one hour of participation were approved by our organization's ethical board." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 449, + 496, + 460 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 449, + 496, + 460 + ], + "spans": [ + { + "bbox": [ + 315, + 449, + 496, + 460 + ], + "type": "text", + "content": "4 Evaluation Metrics and Statistics" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 463, + 559, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 463, + 559, + 497 + ], + "spans": [ + { + "bbox": [ + 313, + 463, + 559, + 497 + ], + "type": "text", + "content": "In this section we discuss how we quantify effectiveness for the preference elicitation and advisory discussion stages, respectively, in addition to summarizing dataset statistics for each." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 315, + 506, + 534, + 519 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 506, + 534, + 519 + ], + "spans": [ + { + "bbox": [ + 315, + 506, + 534, + 519 + ], + "type": "text", + "content": "4.1 Preference Elicitation Metrics (Stage 1)" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "spans": [ + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "type": "text", + "content": "To evaluate the quality of the first preference elicitation stage, we want to measure how well the LLM-advisor has captured the investor preferences as defined in the investor profile " + }, + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "type": "text", + "content": " (see Section 3.1). Each investor profile " + }, + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "type": "inline_equation", + "content": "i \\in I" + }, + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "type": "text", + "content": " defines key features of the investor, such as preferring high-growth stocks, or favoring regular payouts, denoted " + }, + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "type": "inline_equation", + "content": "i^{pref}" + }, + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "type": "text", + "content": ". We have three investor profiles (" + }, + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "type": "inline_equation", + "content": "|I| = 3" + }, + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "type": "inline_equation", + "content": "10(n)" + }, + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "type": "text", + "content": " participants performing elicitation on " + }, + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "type": "inline_equation", + "content": "i_u^{LLM}" + }, + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "type": "text", + "content": " for each profile and each LLM variant, i.e. there are 120 elicitation attempts in total, with 30 attempts per LLM-advisor variant. Following the notation in Section 3, " + }, + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "type": "inline_equation", + "content": "i_u^{LLM}" + }, + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "type": "text", + "content": " in this case denotes a similar list of features to " + }, + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "type": "inline_equation", + "content": "i^{pref}" + }, + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "type": "text", + "content": " that LLM-advisor learned about the investor during conversation with a participant " + }, + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 313, + 521, + 559, + 676 + ], + "type": "text", + "content": ", which we derive from a manual analysis of the elicitation output (i.e. what is produced by response summarization). Intuitively, the closer the features produced from" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 248, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 248, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 248, + 69 + ], + "type": "text", + "content": "Are Generative AI Agents Effective Personalized Financial Advisors?" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 438, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 438, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 438, + 60, + 558, + 69 + ], + "type": "text", + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 62, + 677, + 295, + 710 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 677, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 62, + 677, + 295, + 710 + ], + "type": "text", + "content": "(7) Exit Questionnaire: Once a pair of LLM-advisor variants have been tested, the user fills in an exit questionnaire that is designed to ask the overall experience in the user study." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 683, + 560, + 710 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 683, + 560, + 710 + ], + "spans": [ + { + "bbox": [ + 314, + 683, + 560, + 710 + ], + "type": "text", + "content": "Further details about the LLM configuration, investor narratives, relevant scores, prompts and scripts for data analysis can be accessed at the following repository: https://github.com/TTsamurai/LLMAdvisor_supplement" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 103, + 276, + 252 + ], + "blocks": [ + { + "bbox": [ + 51, + 83, + 294, + 94 + ], + "lines": [ + { + "bbox": [ + 51, + 83, + 294, + 94 + ], + "spans": [ + { + "bbox": [ + 51, + 83, + 294, + 94 + ], + "type": "text", + "content": "Table 2: General statistics of the collected conversation data." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 70, + 103, + 276, + 252 + ], + "lines": [ + { + "bbox": [ + 70, + 103, + 276, + 252 + ], + "spans": [ + { + "bbox": [ + 70, + 103, + 276, + 252 + ], + "type": "table", + "html": "
Participants60
Time Period2024/10/24 ~ 2024/11/7
Total Turns10,008
Stage 1: Preference Elicitation
Total Turns1,788
Number of Sessions120
Avg. Turns/Session15.8
Avg. User Words/Turn9.8
Stage 2: Advisory Discussion
Total Turns8,220
Number of Sessions480
Avg. Turns/Session18.2
Avg. User Words/Turn13.0
", + "image_path": "a6b48ac0208aa2ebad6a9884585dd0814d5926805bfbac9e8cc672c73a856a67.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 262, + 295, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 262, + 295, + 297 + ], + "spans": [ + { + "bbox": [ + 50, + 262, + 295, + 297 + ], + "type": "text", + "content": "any elicitation attempt " + }, + { + "bbox": [ + 50, + 262, + 295, + 297 + ], + "type": "inline_equation", + "content": "i_{u}^{LLM}" + }, + { + "bbox": [ + 50, + 262, + 295, + 297 + ], + "type": "text", + "content": " is to " + }, + { + "bbox": [ + 50, + 262, + 295, + 297 + ], + "type": "inline_equation", + "content": "i^{pref}" + }, + { + "bbox": [ + 50, + 262, + 295, + 297 + ], + "type": "text", + "content": ", the better the LLM-advisor is performing. To this end, we report elicitation accuracy for each investor profile, calculated as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 86, + 300, + 295, + 336 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 300, + 295, + 336 + ], + "spans": [ + { + "bbox": [ + 86, + 300, + 295, + 336 + ], + "type": "interline_equation", + "content": "\\text {E l i c i t a t i o n A c c u r a c y} (i) = \\frac {1}{n} \\sum_ {j = 1} ^ {n} \\frac {\\left| i _ {j} ^ {L L M} \\cap i ^ {p r e f} \\right|}{\\left| i ^ {p r e f} \\right|} \\tag {1}", + "image_path": "2a53765f62dcad92803105547e09853335cbdae1725255060f964dc9c83649c1.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 346, + 295, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 346, + 295, + 426 + ], + "spans": [ + { + "bbox": [ + 50, + 346, + 295, + 426 + ], + "type": "text", + "content": "Human Advisor: To provide a point of comparison, we also conduct a preference elicitation with a financial expert using the same prompt and instructions as the LLM. This allows us to evaluate how close LLMs are to a paid human advisor undertaking the same task. More specifically, for each investor profile, three participants engaged with this expert, who then produced a set of preferences " + }, + { + "bbox": [ + 50, + 346, + 295, + 426 + ], + "type": "inline_equation", + "content": "i_u^{Expert}" + }, + { + "bbox": [ + 50, + 346, + 295, + 426 + ], + "type": "text", + "content": ", which can be used instead of " + }, + { + "bbox": [ + 50, + 346, + 295, + 426 + ], + "type": "inline_equation", + "content": "i_u^{LLM}" + }, + { + "bbox": [ + 50, + 346, + 295, + 426 + ], + "type": "text", + "content": " in Equation 1." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 434, + 275, + 448 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 434, + 275, + 448 + ], + "spans": [ + { + "bbox": [ + 51, + 434, + 275, + 448 + ], + "type": "text", + "content": "4.2 Advisory Effectiveness Metrics (Stage 2)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "spans": [ + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "text", + "content": "Ranking correlation (Spearman's Rho): In the second stage, we evaluate how well the LLM-advisor can support an investor to select financial assets that are suitable for them to invest in. Recall from Figure 3 that after a participant finishes discussing all assets with the LLM-advisor, they rank those assets " + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "inline_equation", + "content": "a \\in A_i" + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "text", + "content": " based on the likelihood they will invest in each, i.e. each participant " + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "text", + "content": " acting on a profile " + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "text", + "content": " we have an asset ranking " + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "inline_equation", + "content": "R(A_i, i_u)" + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "text", + "content": ". As illustrated in Figure 2, each investor profile " + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "text", + "content": " was derived from a ground truth set of investor preferences " + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "inline_equation", + "content": "i^{pref}" + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "text", + "content": ", which an expert used to create a ground truth ranking " + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "inline_equation", + "content": "R(A_i, i^{pref})" + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "text", + "content": ", i.e. the \"correct\" ranking of assets. Intuitively the closer the " + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "inline_equation", + "content": "R(A_i, i_u)" + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "text", + "content": " is to " + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "inline_equation", + "content": "R(A_i, i^{pref})" + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "text", + "content": ", the better the advisor is performing, as the participant was better able to distinguish suitable assets vs. unsuitable ones. Hence, to evaluate the effectiveness of the advisory task, we report the mean ranking correlation (Spearman's Rho) between " + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "inline_equation", + "content": "R(A_i, i_u)" + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "inline_equation", + "content": "R(A_i, i^{pref})" + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "text", + "content": " across participants " + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 50, + 449, + 295, + 628 + ], + "type": "text", + "content": " for each LLM-advisor." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "type": "text", + "content": "Advisor Assessment Questionnaire: Lastly, we also gather qualitative data from each participant via a questionnaire. In particular, after ranking assets each participant, reports how they feel the LLM-advisor performed in terms of 7 dimensions, listed in Table 1, such as perceived usefulness, trust, and user satisfaction. We use this data later to evaluate how sensitive the user is to differences in the LLM-advisor." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 317, + 137, + 559, + 213 + ], + "blocks": [ + { + "bbox": [ + 314, + 83, + 560, + 128 + ], + "lines": [ + { + "bbox": [ + 314, + 83, + 560, + 128 + ], + "spans": [ + { + "bbox": [ + 314, + 83, + 560, + 128 + ], + "type": "text", + "content": "Table 3: Stage 1 - Comparison of Elicitation Accuracy of an expert vs. different LLM-advisors for each investor profile. The best advisor is highlighted in bold. Arrows denote percentage increases " + }, + { + "bbox": [ + 314, + 83, + 560, + 128 + ], + "type": "inline_equation", + "content": "(\\uparrow)" + }, + { + "bbox": [ + 314, + 83, + 560, + 128 + ], + "type": "text", + "content": " or decreases " + }, + { + "bbox": [ + 314, + 83, + 560, + 128 + ], + "type": "inline_equation", + "content": "(\\downarrow)" + }, + { + "bbox": [ + 314, + 83, + 560, + 128 + ], + "type": "text", + "content": " compared to the expert." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 137, + 559, + 213 + ], + "lines": [ + { + "bbox": [ + 317, + 137, + 559, + 213 + ], + "spans": [ + { + "bbox": [ + 317, + 137, + 559, + 213 + ], + "type": "table", + "html": "
Investor ProfileExpertLLM-Advisors
LLM+Extr.+Cons.Average
Growth-Oriented0.780.760.800.790.78→0.0%
Conservative-Income0.890.820.750.870.82↓7.8%
Risk-Taking0.890.480.600.550.53↓40.5%
Average0.850.690.700.730.70↓17.6%
", + "image_path": "d0bf440a8e47e435aae0fc2e57b40bd06a3e25f2e13d643069bc8f0b44f7c2b6.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 315, + 216, + 427, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 216, + 427, + 227 + ], + "spans": [ + { + "bbox": [ + 315, + 216, + 427, + 227 + ], + "type": "text", + "content": "4.3 Dataset Statistics" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 230, + 559, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 230, + 559, + 307 + ], + "spans": [ + { + "bbox": [ + 313, + 230, + 559, + 307 + ], + "type": "text", + "content": "Table 2 summarizes the statistics of the data collected during the two stages of our user study. Each conversation that a participant had with an LLM-advisor in either stage 1 or 2 is referred to as a session, e.g. during Stage 1, there were 3 investor profiles * 10 participants * 4 LLM-advisors, resulting in 120 sessions. Stage 2 has 4x the number of sessions, as there are four assets associated with each profile (" + }, + { + "bbox": [ + 313, + 230, + 559, + 307 + ], + "type": "inline_equation", + "content": "A_i" + }, + { + "bbox": [ + 313, + 230, + 559, + 307 + ], + "type": "text", + "content": ") to discuss with the LLM-advisor." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 307, + 559, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 307, + 559, + 373 + ], + "spans": [ + { + "bbox": [ + 313, + 307, + 559, + 373 + ], + "type": "text", + "content": "From Table 2 we observe that in contrast to other conversational tasks [36, 37], financial information-seeking appears to require more extended interactions. On average, preference elicitation involves 15 turns per session with 9.8 words per turn, whereas advisory discussions involve 18 turns per session with 13.0 words per turn, highlighting the overall complexity of the task." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 381, + 372, + 393 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 381, + 372, + 393 + ], + "spans": [ + { + "bbox": [ + 315, + 381, + 372, + 393 + ], + "type": "text", + "content": "5 Results" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 314, + 396, + 560, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 396, + 560, + 430 + ], + "spans": [ + { + "bbox": [ + 314, + 396, + 560, + 430 + ], + "type": "text", + "content": "In this work, we explore how to design conversational financial advisors that enhance both decision-making and positive experience. To achieve this, our user study is guided by 3 core research questions." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 331, + 431, + 558, + 496 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 331, + 431, + 558, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 431, + 558, + 452 + ], + "spans": [ + { + "bbox": [ + 331, + 431, + 558, + 452 + ], + "type": "text", + "content": "- RQ1: Can LLM-advisors effectively elicit user preferences through conversation?" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 331, + 453, + 558, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 453, + 558, + 474 + ], + "spans": [ + { + "bbox": [ + 331, + 453, + 558, + 474 + ], + "type": "text", + "content": "- RQ2: Does personalization lead to better decisions and more positive advisor assessment?" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 331, + 475, + 558, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 475, + 558, + 496 + ], + "spans": [ + { + "bbox": [ + 331, + 475, + 558, + 496 + ], + "type": "text", + "content": "- RQ3: Do different personality traits affect decision quality and advisor assessment?" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 509, + 468, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 509, + 468, + 521 + ], + "spans": [ + { + "bbox": [ + 315, + 509, + 468, + 521 + ], + "type": "text", + "content": "5.1 RQ1: Elicitation accuracy" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 523, + 559, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 523, + 559, + 611 + ], + "spans": [ + { + "bbox": [ + 313, + 523, + 559, + 611 + ], + "type": "text", + "content": "We begin by examining how effective the LLM-advisors are at identifying investment preferences during conversations in Stage 1. Elicitation Accuracy is the primary metric, where we contrast the mean accuracy across 10 sessions in comparison to a human expert tackling the same task (see Section 4.1). Table 3 reports elicitation accuracy for each LLM-advisor and the Human Expert across investment profiles. Arrows denote percentage increases " + }, + { + "bbox": [ + 313, + 523, + 559, + 611 + ], + "type": "inline_equation", + "content": "(\\uparrow)" + }, + { + "bbox": [ + 313, + 523, + 559, + 611 + ], + "type": "text", + "content": " or decreases " + }, + { + "bbox": [ + 313, + 523, + 559, + 611 + ], + "type": "inline_equation", + "content": "(\\downarrow)" + }, + { + "bbox": [ + 313, + 523, + 559, + 611 + ], + "type": "text", + "content": " of the LLM-advisor compared to the expert." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 611, + 559, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 611, + 559, + 666 + ], + "spans": [ + { + "bbox": [ + 313, + 611, + 559, + 666 + ], + "type": "text", + "content": "To set expectations, we first consider the performance of the expert in the first column in Table 3, as we might expect, the expert maintains consistently high performance across all profiles, averaging " + }, + { + "bbox": [ + 313, + 611, + 559, + 666 + ], + "type": "inline_equation", + "content": "85\\%" + }, + { + "bbox": [ + 313, + 611, + 559, + 666 + ], + "type": "text", + "content": " accuracy (random accuracy is " + }, + { + "bbox": [ + 313, + 611, + 559, + 666 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 313, + 611, + 559, + 666 + ], + "type": "text", + "content": "). This forms an expectation of the performance ceiling for the task." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 666, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 666, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 666, + 559, + 710 + ], + "type": "text", + "content": "Next, we compare the expert performance to each LLM-advisor. From the perspective of preference elicitation, there are three LLM-advisor configurations, those that use only the Baseline Prompt (denoted LLM) from the personalization study, and those that include" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 173, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 173, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 173, + 69 + ], + "type": "text", + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 507, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 507, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 507, + 60, + 558, + 69 + ], + "type": "text", + "content": "Takayanagi et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 294, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 294, + 162 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 294, + 162 + ], + "type": "text", + "content": "a defined personality (either extroverted, " + }, + { + "bbox": [ + 50, + 84, + 294, + 162 + ], + "type": "inline_equation", + "content": "+\\mathrm{Extr}" + }, + { + "bbox": [ + 50, + 84, + 294, + 162 + ], + "type": "text", + "content": ", or conscientious, " + }, + { + "bbox": [ + 50, + 84, + 294, + 162 + ], + "type": "inline_equation", + "content": "+\\mathrm{Cons}" + }, + { + "bbox": [ + 50, + 84, + 294, + 162 + ], + "type": "text", + "content": ".) from the advisor persona study. From Table 3, we observe that the LLM-advisor's performance is generally strong for growth-oriented, and conservative-income investors (with accuracy around " + }, + { + "bbox": [ + 50, + 84, + 294, + 162 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 50, + 84, + 294, + 162 + ], + "type": "text", + "content": ") on average, which is similar to the human advisor. However, for the risk-taking investor profile, the LLM-advisor's elicitation accuracy was substantially lower " + }, + { + "bbox": [ + 50, + 84, + 294, + 162 + ], + "type": "inline_equation", + "content": "(-40.5\\%)" + }, + { + "bbox": [ + 50, + 84, + 294, + 162 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 163, + 295, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 163, + 295, + 370 + ], + "spans": [ + { + "bbox": [ + 50, + 163, + 295, + 370 + ], + "type": "text", + "content": "From a manual failure analysis, we observed the following trends that contribute to the performance gap with the human advisor, particularly for the risk-taking profile. First, it is notable that elicitation failures can originate from the investor (participant) rather than the LLM. Recall that one of the aspects that makes finance more challenging than domains like movie recommendation is that the \"user\" is inexpert, and so may give incorrect information during the conversation. Indeed, we observed cases where the participant confused concepts such as the difference between a growth and a value stock, as well as cyclical/non-cyclical assets. On the other side, preference hallucination is a core issue for the LLM-advisor. The LLM is a probabilistic token generator conditioned on the baseline prompt and prior conversation, and as a result, in some scenarios, the contextual content can override a statement by the investor. This type of error is more likely when the investor is unsure in their responses or when they provide contradictory statements. For instance, an investor expressing an interest in the consumer discretionary sector while simultaneously opting for non-cyclical stocks, despite consumer discretionary being inherently cyclical." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 375, + 295, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 375, + 295, + 495 + ], + "spans": [ + { + "bbox": [ + 50, + 375, + 295, + 495 + ], + "type": "text", + "content": "To answer RQ1, our results demonstrate that LLM-advisor's are able to elicit preferences from a user via conversation and that for 2/3's of the user profiles tested, elicitation accuracy was consistently equivalent or close to that of an expert human advisor. However, we observed a clear failure mode when testing the risk-taking profile, where misunderstandings by the investors and hallucinations within the LLM compound to result in accuracy that is close to random. Overall, we consider this a promising result, as the majority of the time it is effective, and the failure mode observed might be rectified by better context crafting and the addition of contradiction detection; both directions for future research." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 506, + 263, + 518 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 506, + 263, + 518 + ], + "spans": [ + { + "bbox": [ + 51, + 506, + 263, + 518 + ], + "type": "text", + "content": "5.2 RQ2: Effectiveness of personalization" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 521, + 295, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 521, + 295, + 641 + ], + "spans": [ + { + "bbox": [ + 50, + 521, + 295, + 641 + ], + "type": "text", + "content": "Having shown that automatic preference elicitation is possible, we now examine stage 2 of our study, namely the advisory discussions. Given the inherently personalized nature of financial advice, we expect that the customer preferences obtained during stage 1 will be key to enabling LLM-advisors to provide effective investment advice. Hence, in this section, we compare the performance of an LLM-advisor using only the Baseline Prompt to one that includes the preferences obtained during stage 1 (+Personalized). However, as we observed that preference elicitation is not always successful, we also examine what effect elicitation performance has on the LLM-advisor." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 648, + 295, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 648, + 295, + 681 + ], + "spans": [ + { + "bbox": [ + 50, + 648, + 295, + 681 + ], + "type": "text", + "content": "5.2.1 Non-personalized Decision-making Effectiveness: We initially establish how effective the LLM-advisor is without any information regarding the investor. LLM-advisor effectiveness is measured" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 316, + 171, + 558, + 247 + ], + "blocks": [ + { + "bbox": [ + 314, + 83, + 560, + 160 + ], + "lines": [ + { + "bbox": [ + 314, + 83, + 560, + 160 + ], + "spans": [ + { + "bbox": [ + 314, + 83, + 560, + 160 + ], + "type": "text", + "content": "Table 4: Investor decision-making effectiveness, expressed as the Spearman's Rho correlation between the investor's asset ranking and the expert asset ranking (higher is better). " + }, + { + "bbox": [ + 314, + 83, + 560, + 160 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 314, + 83, + 560, + 160 + ], + "type": "text", + "content": " indicates statistical improvements (Welch's t-test with " + }, + { + "bbox": [ + 314, + 83, + 560, + 160 + ], + "type": "inline_equation", + "content": "p<0.05" + }, + { + "bbox": [ + 314, + 83, + 560, + 160 + ], + "type": "text", + "content": ") over the not personalized baseline, while " + }, + { + "bbox": [ + 314, + 83, + 560, + 160 + ], + "type": "inline_equation", + "content": "\\S" + }, + { + "bbox": [ + 314, + 83, + 560, + 160 + ], + "type": "text", + "content": " indicates significant differences between cases with successful and unsuccessful preference elicitations." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 316, + 171, + 558, + 247 + ], + "lines": [ + { + "bbox": [ + 316, + 171, + 558, + 247 + ], + "spans": [ + { + "bbox": [ + 316, + 171, + 558, + 247 + ], + "type": "table", + "html": "
Advisor ConfigInvestor vs. Expert (Spearman's Rho)
PersonalizationPersonalityAllPreference Elicitation
SuccessfulUnsuccessful
BaselineNone0.110--
+PersonalizedNone0.3100.481†§-0.228
+Personalized+Extroverted0.1220.243§-0.286
+Personalized+Conscientious0.260.365-0.025
", + "image_path": "c4b270fbbcb62fddb142afbc026f33c4b10b865d46fa8bd61bc31254260efa7f.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 252, + 559, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 252, + 559, + 340 + ], + "spans": [ + { + "bbox": [ + 313, + 252, + 559, + 340 + ], + "type": "text", + "content": "based on how well the investor was able to rank the assets discussed by suitability to them. The primary metric is average Spearman's Rho correlation between the investor ranking and the ground truth ranking (see Section 4.2), reported in Table 4 row 1. As we expect, baseline advisory performance is low, with only a very weak positive correlation to the ground truth ranking of 0.11. This indicates that without further evidence, the LLM is not able to meaningfully guide the investor." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 346, + 560, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 346, + 560, + 456 + ], + "spans": [ + { + "bbox": [ + 313, + 346, + 560, + 456 + ], + "type": "text", + "content": "5.2.2 Personalized Decision-making Effectiveness: Having established our baseline, we now examine the impact that adding the investor preferences collected during stage 1 has, comparing Table 4 row 1 (baseline) to row 2 (personalized). As we anticipated, personalization is beneficial, with investor decision-making effectiveness increasing from 0.11 to 0.31 (average Spearman's Rho correlation to the expert ranking). However, this correlation is still weak, illustrating that while discussing assets with the LLM-advisor is better than no help at all, our participants are still struggling to evaluate the suitability of financial assets." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 456, + 559, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 456, + 559, + 643 + ], + "spans": [ + { + "bbox": [ + 313, + 456, + 559, + 643 + ], + "type": "text", + "content": "This correlation is an average over all the participants in the user study, regardless of how effective their preference elicitation was in stage 1. Hence, we might ask whether the low correlation is due to the LLM-advisor being confused by poor preference elicitation data. To explore this, Table 4 also reports investor decision-making effectiveness stratified based on whether stage 1 was successful (column 4) or not (column 5). As expected, we see a statistically significant increase in investor decision-making effectiveness when preference elicitation was successful when compared to non-personalized sessions (0.481 vs. 0.110). More concerningly, we also see the LLM-advisor has a strong negative influence on the investors' decision-making capability if preference elicitation fails, as illustrated by the negative correlations with the expert in column 5. This result highlights both that effective preference elicitation is crucial, but also that the LLM-advisor can easily influence the investor into making poor decisions, as the human is heavily reliant on the agent to navigate the relatively unfamiliar financial information space." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 649, + 559, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 649, + 559, + 682 + ], + "spans": [ + { + "bbox": [ + 314, + 649, + 559, + 682 + ], + "type": "text", + "content": "5.2.3 Participant Assessment of the Advisor: So far we have demonstrated that there is a large difference between a non-personalized LLM-advisor and a personalized one, in terms of how they can" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 248, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 248, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 248, + 69 + ], + "type": "text", + "content": "Are Generative AI Agents Effective Personalized Financial Advisors?" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 438, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 438, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 438, + 60, + 558, + 69 + ], + "type": "text", + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 691, + 295, + 710 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 691, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 691, + 295, + 710 + ], + "type": "text", + "content": "5Note we cannot have a personalized variant here, as the personalization evidence is derived from this stage." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 691, + 558, + 710 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 691, + 558, + 710 + ], + "spans": [ + { + "bbox": [ + 314, + 691, + 558, + 710 + ], + "type": "text", + "content": "6We define that an elicitation session is successful if more than 50% of the investor's preferences were correctly captured" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 77, + 164, + 535, + 274 + ], + "blocks": [ + { + "bbox": [ + 50, + 83, + 560, + 160 + ], + "lines": [ + { + "bbox": [ + 50, + 83, + 560, + 160 + ], + "spans": [ + { + "bbox": [ + 50, + 83, + 560, + 160 + ], + "type": "text", + "content": "Table 5: Average participant users' response to advisor assessment questionnaire under different advisor conditions. Columns labeled with advisor condition (Baseline, +Pers., +Cons., +Extr.) contain a 7-point Likert scale (higher is better). \"p\" column contains Wilcoxon signed-rank test " + }, + { + "bbox": [ + 50, + 83, + 560, + 160 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 50, + 83, + 560, + 160 + ], + "type": "text", + "content": "-values for (RQ2) Baseline vs. +Personalized (Pers.), and (RQ3) +Conscientious (Cons.) vs. +Extroverted (Extr), for both the full data (All) and the subset where the elicitation accuracy is above 0.5. \"Successful Elicitation\" refers to the subset where elicitation accuracy was ≥ 0.5. For RQ2, this subset consists of pairs for which +Pers elicitation is successful, while for RQ3, it consists of pairs for which both +Extr and +Cons elicitation are successful. Boldface indicates significant effects with † for " + }, + { + "bbox": [ + 50, + 83, + 560, + 160 + ], + "type": "inline_equation", + "content": "p < 0.1" + }, + { + "bbox": [ + 50, + 83, + 560, + 160 + ], + "type": "text", + "content": " and ‡ for " + }, + { + "bbox": [ + 50, + 83, + 560, + 160 + ], + "type": "inline_equation", + "content": "p < 0.05" + }, + { + "bbox": [ + 50, + 83, + 560, + 160 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 77, + 164, + 535, + 274 + ], + "lines": [ + { + "bbox": [ + 77, + 164, + 535, + 274 + ], + "spans": [ + { + "bbox": [ + 77, + 164, + 535, + 274 + ], + "type": "table", + "html": "
Response Dimension(RQ2) Baseline vs. +Personalized(RQ3) +Conscientious vs. +Extroverted
AllSuccessful ElicitationAllSuccessful Elicitation
Baseline+Pers.pBaseline+Pers.p+Cons.+Extr.p+Cons.+Extr.p
Perceived Personalization5.7595.7240.8385.7625.9050.7515.5005.5000.6635.5885.7060.941
Emotional Trust5.1035.2410.4465.1435.3330.5375.0385.1540.6004.7065.2350.034‡
Trust in Competence5.6905.6900.8175.8105.8570.7825.9626.0770.5386.0006.0001.000
Intention to Use5.3105.4830.5055.4295.7140.1664.8855.4620.005‡4.9415.5880.013‡
Perceived Usefulness5.2415.5170.1835.3815.8100.1945.4235.5380.4255.1765.1180.968
Overall Satisfaction5.3455.6900.1165.4295.8100.098†5.2695.5770.1795.1185.5290.244
Information Provision5.5175.9660.026‡5.7146.1430.053†5.6925.6540.9535.5885.7650.490
", + "image_path": "5e64b4ef44e612c46e88ed22abbd02c291273be03180e6dbc3ff63f2edfb84e5.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 289, + 294, + 311 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 289, + 294, + 311 + ], + "spans": [ + { + "bbox": [ + 50, + 289, + 294, + 311 + ], + "type": "text", + "content": "alter the decision-making of the investor/participant. But can the participant tell the differences between them?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 312, + 295, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 312, + 295, + 519 + ], + "spans": [ + { + "bbox": [ + 50, + 312, + 295, + 519 + ], + "type": "text", + "content": "Table 5 reports the aggregation of the qualitative data we collected from each participant after they finished interacting with each LLM-advisor in terms of 7 dimensions, where we start by focusing on the RQ2-All columns, i.e. comparing the baseline and personalized variants. The important observation to note here is that the participant preference scores for both variants are statistically indistinguishable, except under the quality of information provision criteria. This means that our participants cannot tell if the LLM-advisor is personalizing to them, and trust the worse agent just as much as the better one. Furthermore, if we consider the best case scenario where the preference elicitation was successful (RQ2 Successful Elicitation columns) we observe the same pattern, even though the difference between the baseline and the personalized variants in terms of the effect it has on the participant decision-making is more pronounced. This underlines one of the core risks of using LLM-advisors in the financial domain; since our users are inherently inexpert they lack the fundamental skills to judge to what extent the LLM is providing good advice, meaning that there is no safety net if the LLM makes a mistake." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 525, + 295, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 525, + 295, + 668 + ], + "spans": [ + { + "bbox": [ + 50, + 525, + 295, + 668 + ], + "type": "text", + "content": "To answer RQ2, our results show that a personalized LLM-advisor is able to provide useful financial advice when it has accurate information regarding the preferences of the investor. This is demonstrated by better decision-making capability by participants using the personalized advisor in comparison to the non-personalized one. However, we also identified two important challenges to adoption. First, the impact the LLM-advisor has is strongly tied to the quality of the preference elicitation data provided, where poor preference elicitation will cause the agent to actively direct the investor to the wrong assets. Second, while the participants were positive regarding the LLM-advisors across all questionnaire criteria, they were not able to consistently tell the difference between good and bad advisors; leading to an increased risk of humans acting on bad advice." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 315, + 288, + 515, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 288, + 515, + 300 + ], + "spans": [ + { + "bbox": [ + 315, + 288, + 515, + 300 + ], + "type": "text", + "content": "5.3 RQ3: Effectiveness of personalities" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 303, + 560, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 303, + 560, + 456 + ], + "spans": [ + { + "bbox": [ + 313, + 303, + 560, + 456 + ], + "type": "text", + "content": "Once we have confirmed the utility of personalization for LLM-advisors, we now study the effect that the personality of the advisor has on users' financial information-seeking. As previous studies have shown [32], chatbot personality can affect the way humans interact with the chatbot, and therefore affect the effectiveness and perception of LLM-advisors. To understand whether personality affects LLM financial advisors, we compare two personalized LLM-advisors on which we have injected a pre-defined personality: an extroverted personality and a conscientious personality. While we could consider the personalized LLM-advisor discussed in Section 5.2 as a third distinct personality (the base LLM personality of the LLM), we shall not compare it with our personality-injected models, because different sets of participants were used in the personalization study and the advisor-persona study." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 464, + 559, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 464, + 559, + 540 + ], + "spans": [ + { + "bbox": [ + 313, + 464, + 559, + 540 + ], + "type": "text", + "content": "5.3.1 Decision-making Effectiveness: We first examine the impact of adding personality to the advisors on the decision-making process, by measuring the capacity of the participants to correctly rank the assets (as previously done in Section 5.2). As a primary metric, we again use the average Spearman's Rho correlation between the investor ranking and the ground truth ranking reported in Table 4 rows 3 (extroverted advisor) and row 4 (conscientious advisor)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 541, + 559, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 541, + 559, + 650 + ], + "spans": [ + { + "bbox": [ + 313, + 541, + 559, + 650 + ], + "type": "text", + "content": "We first observe the results for the full set of participants in the user study. Interestingly, we observe a difference between the two advisors, with the conscientious LLM-advisor providing better guidance than the extroverted one (0.26 vs. 0.122). This observation is consistent when we restrict our analysis to those cases where the preference elicitation is successful. While, expectedly, the effectiveness of both advisors improves when the elicitation is successful (0.243 vs. 0.122 in the case of the extroverted advisor and 0.365 vs. 0.26 in the case of the conscientious one), the conscientious advisor has an advantage over the extroverted one (0.365 vs. 0.26)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 651, + 559, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 651, + 559, + 683 + ], + "spans": [ + { + "bbox": [ + 314, + 651, + 559, + 683 + ], + "type": "text", + "content": "These results highlight that providing different personalities to an LLM-advisor can notably impact the capacity of the advisor to provide useful information to the investors." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 172, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 172, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 172, + 69 + ], + "type": "text", + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 507, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 507, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 507, + 60, + 558, + 69 + ], + "type": "text", + "content": "Takayanagi et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 315, + 700, + 493, + 709 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 700, + 493, + 709 + ], + "spans": [ + { + "bbox": [ + 315, + 700, + 493, + 709 + ], + "type": "text", + "content": "Refer to Section 3.3 for a full description of each personality." + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 85, + 294, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 85, + 294, + 150 + ], + "spans": [ + { + "bbox": [ + 53, + 85, + 294, + 150 + ], + "type": "text", + "content": "5.3.2 Participant Assessment of the Advisor: We have observed so far that the use of different personalities affects the user decision-making process. But how do these personalities affect the perception that users have of the LLM-advisor? We observe this in Table 5, in terms of the seven dimensions captured during the advisor assessment questionnaire." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 150, + 294, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 150, + 294, + 270 + ], + "spans": [ + { + "bbox": [ + 53, + 150, + 294, + 270 + ], + "type": "text", + "content": "We first look at the RQ3-All columns, comparing the two personalities. Notably, for the majority of the dimensions, users barely distinguish between both systems. The only answer where we observe a statistically significant difference is the intention to use the system in the future. Surprisingly, despite providing worse guidance to the investor, participants expressed a higher interest in using the extroverted advisor than the conscientious one. When we limit our study to those participants who experienced a successful preference elicitation in both advisor variants, this issue is stressed, as those users also develop a significantly greater emotional trust with the extroverted advisor." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 271, + 294, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 271, + 294, + 315 + ], + "spans": [ + { + "bbox": [ + 53, + 271, + 294, + 315 + ], + "type": "text", + "content": "These observations are worrisome, as they reveal that the personality of a financial advisor cannot only affect the quality of the advice but also lead the investors to trust more on those systems providing worse advice." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 323, + 294, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 323, + 294, + 476 + ], + "spans": [ + { + "bbox": [ + 53, + 323, + 294, + 476 + ], + "type": "text", + "content": "5.3.3 Differences in language: To further understand how personalities affect financial advisory, we analyze the differences in the linguistic patterns provided by extroverted and conscientious advisors. Analyzing participants' reported overall experience from the exit questionnaires in the advisor persona study, over " + }, + { + "bbox": [ + 53, + 323, + 294, + 476 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 53, + 323, + 294, + 476 + ], + "type": "text", + "content": " (7 of 31) described the extroverted advisor as clear, assertive, and cheerful while perceiving the conscientious advisor as straightforward, analytical, yet less confident. Therefore, to quantify the linguistic differences in the advisors, we conduct a financial sentiment analysis of the utterances generated by each advisor. For each utterance, we count the occurrences of positive, negative, and uncertain words from the Loughran and McDonald Financial Sentiment Dictionary [22]. We normalize these counts by the length of the sentences and average the results across all dialogues." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 477, + 294, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 477, + 294, + 641 + ], + "spans": [ + { + "bbox": [ + 53, + 477, + 294, + 641 + ], + "type": "text", + "content": "Figure 4 shows the results, showing the extroverted sentiment scores in blue, and the conscientious scores in orange. For the three sentiment dimensions, differences between advisors are statistically significant (Welch's t-test with " + }, + { + "bbox": [ + 53, + 477, + 294, + 641 + ], + "type": "inline_equation", + "content": "p < 0.01" + }, + { + "bbox": [ + 53, + 477, + 294, + 641 + ], + "type": "text", + "content": "). Figure 4 shows that extroverted advisors tend to use more positive language in their interactions, while conscientious advisors prefer negative and uncertain tones. Through manual analysis of the conversation, we observe that this results in the extroverted advisor focusing on the positive aspects of investments while overlooking serious drawbacks, whereas the conscientious advisor provides a more balanced view of the assets. Because of this, participants guided by conscientious advisors may make more well-informed financial decisions. Meanwhile, the positivity of the extroverted advisor seems more appreciated by the users, which is reflected in higher advisor assessment scores from the post-discussion questionnaire." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 647, + 294, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 647, + 294, + 679 + ], + "spans": [ + { + "bbox": [ + 53, + 647, + 294, + 679 + ], + "type": "text", + "content": "To answer RQ3, our results show that different personalities of a personalized LLM-advisor can affect the utility of the provided advice. This is demonstrated by the better decisions of the study" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 335, + 83, + 538, + 193 + ], + "blocks": [ + { + "bbox": [ + 335, + 83, + 538, + 193 + ], + "lines": [ + { + "bbox": [ + 335, + 83, + 538, + 193 + ], + "spans": [ + { + "bbox": [ + 335, + 83, + 538, + 193 + ], + "type": "image", + "image_path": "709d4faf5630cdd0daf077acd764d4a59ec9401ffa6ebd54c3c945fdcecc8bbc.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 317, + 206, + 558, + 248 + ], + "lines": [ + { + "bbox": [ + 317, + 206, + 558, + 248 + ], + "spans": [ + { + "bbox": [ + 317, + 206, + 558, + 248 + ], + "type": "text", + "content": "Figure 4: Average sentiment scores by advisor personality (extroverted in light blue and conscientious in pastel orange) and category (Positive, Negative, and Uncertainty). Error bars indicate the standard deviation." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 317, + 266, + 558, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 266, + 558, + 319 + ], + "spans": [ + { + "bbox": [ + 317, + 266, + 558, + 319 + ], + "type": "text", + "content": "participants when using an advisor with a conscientious personality than when using an advisor with an extroverted personality. Moreover, the personality of the advisor affects the perception of humans towards the system, and it has the risk of leading investors to further trust those systems that provide worse advice." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 317, + 334, + 390, + 344 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 334, + 390, + 344 + ], + "spans": [ + { + "bbox": [ + 317, + 334, + 390, + 344 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 317, + 348, + 558, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 348, + 558, + 392 + ], + "spans": [ + { + "bbox": [ + 317, + 348, + 558, + 392 + ], + "type": "text", + "content": "In this paper, we have conducted a lab-based user study to examine how effective large language models are as financial advisors. We focus on three core challenges: preference elicitation, investment personalization, and advisor personality." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 317, + 392, + 558, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 392, + 558, + 522 + ], + "spans": [ + { + "bbox": [ + 317, + 392, + 558, + 522 + ], + "type": "text", + "content": "First, our analysis shows that LLMs are effective tools for preference elicitation through conversation. In a majority of cases, they are capable of obtaining investor's preferences with an accuracy close to or equivalent to that of an expert human advisor. However, there are some clear failure cases, as LLMs are vulnerable to contradictory statements and hallucinations, which, in the case of complex investor profiles, can decrease the accuracy of the elicitation to random levels. Although LLMs are promising for elicitation, in a complex domain like finance, investors do not always fully understand their own preferences (or they have difficulties expressing them). Therefore, future work should explore the development of LLM-advisors capable of resolving conflicting user needs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 317, + 523, + 558, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 523, + 558, + 599 + ], + "spans": [ + { + "bbox": [ + 317, + 523, + 558, + 599 + ], + "type": "text", + "content": "Second, personalizing LLMs to provide investment advice can improve the decisions made by the investors, but only when the personalized LLM-advisor receives accurate information about the investor's preferences. If the preference elicitation is not successful, the agent actively directs the investors to the wrong assets on which to invest. This underscores how crucial a good preference elicitation is for providing useful financial advice." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 317, + 600, + 558, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 600, + 558, + 709 + ], + "spans": [ + { + "bbox": [ + 317, + 600, + 558, + 709 + ], + "type": "text", + "content": "Finally, our results suggest that investors are not necessarily aware of what constitutes good financial advice, and therefore, are vulnerable to acting on bad advice provided by LLMs. In the comparison between a non-personalized and a personalized LLM-advisor, although the personalized system led to better decisions, participants were unable to distinguish between the systems. More worryingly, when comparing two personalized advisors with extroverted and conscientious personalities, we observed that, even though the extroverted advisor provided lower-quality advice, participants trusted this advisor more than the conscientious one." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 248, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 248, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 248, + 69 + ], + "type": "text", + "content": "Are Generative AI Agents Effective Personalized Financial Advisors?" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 438, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 438, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 438, + 60, + 558, + 69 + ], + "type": "text", + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 700, + 251, + 709 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 700, + 251, + 709 + ], + "spans": [ + { + "bbox": [ + 52, + 700, + 251, + 709 + ], + "type": "text", + "content": "8Participants were unaware of the specific personas during the study." + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 295, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 295, + 228 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 295, + 228 + ], + "type": "text", + "content": "Our findings highlight that, while personalized LLM-advisors represent a promising research direction, their use in high-stakes domains like finance is not free of risks: due to the limitations of LLMs at capturing complex investment preferences, and the difficulty of investors to discern whether the advice they receive truly serves their interests, LLMs have a notable risk to drive investors to bad financial assets (leading not only to a low satisfaction but also to potentially large monetary losses). However, these drawbacks open interesting research directions not only from a system perspective, but also from a human-centered approach: automated advisory development where we do not just focus on improving the quality of automated systems to guide investors, but also on how the investors will adopt, trust and interact with these AI agents [6, 20]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 256, + 108, + 267 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 256, + 108, + 267 + ], + "spans": [ + { + "bbox": [ + 52, + 256, + 108, + 267 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 269, + 295, + 709 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 56, + 269, + 295, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 269, + 295, + 286 + ], + "spans": [ + { + "bbox": [ + 56, + 269, + 295, + 286 + ], + "type": "text", + "content": "[1] James E. Allen, Curry I. Guinn, and Eric Horvitz. 1999. Mixed-initiative interaction. IEEE Intelligent Systems and their Applications 14, 5 (1999), 14-23." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 286, + 295, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 286, + 295, + 318 + ], + "spans": [ + { + "bbox": [ + 56, + 286, + 295, + 318 + ], + "type": "text", + "content": "[2] Ashay Argal, Siddharth Gupta, Ajay Modi, Pratik Pandey, Simon Shim, and Chang Choo. 2018. Intelligent travel chatbot for predictive recommendation in echo platform. In 2018 IEEE 8th Annual Computing and Communication Workshop and Conference (CCWC 2018). IEEE, 176-183." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 318, + 294, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 318, + 294, + 350 + ], + "spans": [ + { + "bbox": [ + 56, + 318, + 294, + 350 + ], + "type": "text", + "content": "[3] Andreas Bucher, Mateusz Dolata, Sven Eckhardt, Dario Staehelin, and Gerhard Schwabe. 2024. Talking to Multi-Party Conversational Agents in Advisory Services: Command-based vs. Conversational Interactions. Proceedings of the ACM on Human-Computer Interaction 8, GROUP (2024)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 350, + 294, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 350, + 294, + 382 + ], + "spans": [ + { + "bbox": [ + 56, + 350, + 294, + 382 + ], + "type": "text", + "content": "[4] Wanling Cai, Yucheng Jin, and Li Chen. 2022. Impacts of personal characteristics on user trust in conversational recommender systems. In Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems (CHI 2022). Article 489, 14 pages." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 382, + 294, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 382, + 294, + 398 + ], + "spans": [ + { + "bbox": [ + 56, + 382, + 294, + 398 + ], + "type": "text", + "content": "[5] Gary Charness, Uri Gneezy, and Alex Imas. 2013. Experimental methods: Eliciting risk preferences. Journal of Economic Behavior & Organization 87 (2013), 43-51." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 398, + 294, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 398, + 294, + 414 + ], + "spans": [ + { + "bbox": [ + 56, + 398, + 294, + 414 + ], + "type": "text", + "content": "[6] Erin K. Chiou and John D. Lee. 2023. Trusting automation: Designing for responsivity and resilience. Human factors 65, 1 (2023), 137-165." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 414, + 294, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 414, + 294, + 445 + ], + "spans": [ + { + "bbox": [ + 56, + 414, + 294, + 445 + ], + "type": "text", + "content": "[7] Konstantina Christakopoulou, Filip Radlinski, and Katja Hofmann. 2016. Towards conversational recommender systems. In Proceedings of the 22nd ACM SIGKDD international conference on knowledge discovery and data mining (KDD 2016). 815-824." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 445, + 294, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 445, + 294, + 469 + ], + "spans": [ + { + "bbox": [ + 56, + 445, + 294, + 469 + ], + "type": "text", + "content": "[8] Berardina De Carolis, Marco de Gemmis, Pasquale Lops, and Giuseppe Palestra. 2017. Recognizing users feedback from non-verbal communicative acts in conversational recommender systems. Pattern Recognition Letters 99 (2017), 87-95." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 469, + 294, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 469, + 294, + 501 + ], + "spans": [ + { + "bbox": [ + 56, + 469, + 294, + 501 + ], + "type": "text", + "content": "[9] Mateusz Dolata, Doris Agotai, Simon Schubiger, and Gerhard Schwabe. 2019. Pen-and-paper Rituals in Service Interaction: Combining High-touch and High-tech in Financial Advisory Encounters. Proceedings of the ACM on Human-Computer Interaction 3, CSCW, Article 224 (2019)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 501, + 295, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 501, + 295, + 517 + ], + "spans": [ + { + "bbox": [ + 53, + 501, + 295, + 517 + ], + "type": "text", + "content": "[10] Eugene F Fama and Kenneth R French. 1998. Value versus growth: The international evidence. The journal of finance 53, 6 (1998), 1975-1999." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 517, + 294, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 517, + 294, + 548 + ], + "spans": [ + { + "bbox": [ + 53, + 517, + 294, + 548 + ], + "type": "text", + "content": "[11] Christian Hildebrand and Anouk Bergner. 2021. Conversational robo advisors as surrogates of trust: onboarding experience, firm perception, and consumer financial decision making. Journal of the Academy of Marketing Science 49, 4 (2021), 659-676." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 53, + 548, + 294, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 548, + 294, + 565 + ], + "spans": [ + { + "bbox": [ + 53, + 548, + 294, + 565 + ], + "type": "text", + "content": "[12] Dietmar Jannach, Ahtsham Manzoor, Wanling Cai, and Li Chen. 2021. A survey on conversational recommender systems. Comput. Surveys 54, 5 (2021), 1-36." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 53, + 565, + 294, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 565, + 294, + 597 + ], + "spans": [ + { + "bbox": [ + 53, + 565, + 294, + 597 + ], + "type": "text", + "content": "[13] Guangyuan Jiang, Manjie Xu, Song-Chun Zhu, Wenjuan Han, Chi Zhang, and Yixin Zhu. 2024. Evaluating and inducing personality in pre-trained language models. In Proceedings of the 37th Conference on Neural Information Processing Systems (NeurIPS 2023)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 53, + 597, + 294, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 597, + 294, + 628 + ], + "spans": [ + { + "bbox": [ + 53, + 597, + 294, + 628 + ], + "type": "text", + "content": "[14] Hang Jiang, Xiajie Zhang, Xubo Cao, Cynthia Breazeal, Deb Roy, and Jad Kabbara. 2024. PersonalLLM: Investigating the Ability of Large Language Models to Express Personality Traits. In Findings of the Association for Computational Linguistics: NAACL 2024. 3605-3627." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 53, + 628, + 294, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 628, + 294, + 661 + ], + "spans": [ + { + "bbox": [ + 53, + 628, + 294, + 661 + ], + "type": "text", + "content": "[15] Francis M. Kinniry Jr., Colleen M. Jaconetti, Michael A. DijJoseph, Yan Zilbering, Donald G. Bennyhoff, and Georgina Yarwood. 2020. Putting a value on your value: Quantifying Vanguard Adviser's Alpha in the UK. Technical Report. The Vanguard Group, Valley Forge, Pennsylvania, USA." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 53, + 661, + 294, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 661, + 294, + 685 + ], + "spans": [ + { + "bbox": [ + 53, + 661, + 294, + 685 + ], + "type": "text", + "content": "[16] Sherrie Y.X. Komiak and Izak Benbasat. 2006. The effects of personalization and familiarity on trust and adoption of recommendation agents. MIS quarterly (2006), 941-960." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 53, + 685, + 294, + 709 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 685, + 294, + 709 + ], + "spans": [ + { + "bbox": [ + 53, + 685, + 294, + 709 + ], + "type": "text", + "content": "[17] Ivica Kostric, Krisztian Balog, and Filip Radlinski. 2021. Soliciting user preferences in conversational recommender systems via usage-related questions. In Proceedings of the 15th ACM Conference on Recommender Systems. 724-729." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 86, + 559, + 709 + ], + "type": "list", + "angle": 0, + "index": 45, + "blocks": [ + { + "bbox": [ + 317, + 86, + 559, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 86, + 559, + 118 + ], + "spans": [ + { + "bbox": [ + 317, + 86, + 559, + 118 + ], + "type": "text", + "content": "[18] Kausik Lakkaraju, Sara E. Jones, Sai Krishna Revanth Vuruma, Vishal Pallagani, Bharath C. Muppasani, and Biplav Srivastava. 2023. LLMs for Financial Advise-ment: A Fairness and Efficacy Study in Personal Decision Making. In Proceedings of the 4th ACM Conference on AI in Finance (ICAIF 2023). 100-107." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 118, + 559, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 118, + 559, + 142 + ], + "spans": [ + { + "bbox": [ + 317, + 118, + 559, + 142 + ], + "type": "text", + "content": "[19] Cong Li. 2016. When does web-based personalization really work? The distinction between actual personalization and perceived personalization. Computers in human behavior 54 (2016), 25-33." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 142, + 559, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 142, + 559, + 174 + ], + "spans": [ + { + "bbox": [ + 317, + 142, + 559, + 174 + ], + "type": "text", + "content": "[20] Zhuoyan Li, Zhuoran Lu, and Ming Yin. 2023. Modeling human trust and reliance in AI-assisted decision making: a markovian approach. In Proceedings of the 37th AAAI Conference on Artificial Intelligence (AAAI 2023/IAAI 2023/EAAI 2023). Article 679." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 175, + 559, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 175, + 559, + 198 + ], + "spans": [ + { + "bbox": [ + 317, + 175, + 559, + 198 + ], + "type": "text", + "content": "[21] Andrew W. Lo and Jillian Ross. 2024. Can ChatGPT Plan Your Retirement?: Generative AI and Financial Advice. Harvard Data Science Review (2024). Issue Special Issue 5." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 198, + 559, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 198, + 559, + 214 + ], + "spans": [ + { + "bbox": [ + 316, + 198, + 559, + 214 + ], + "type": "text", + "content": "[22] Tim Loughran and Bill McDonald. 2011. When is a liability not a liability? Textual analysis, dictionaries, and 10-Ks. The Journal of finance 66, 1 (2011), 35-65." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 214, + 559, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 214, + 559, + 230 + ], + "spans": [ + { + "bbox": [ + 316, + 214, + 559, + 230 + ], + "type": "text", + "content": "[23] Robert R. McCrae and Oliver P. John. 1992. An introduction to the five-factor model and its applications. Journal of personality 60 2 (1992), 175-215." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 230, + 559, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 230, + 559, + 262 + ], + "spans": [ + { + "bbox": [ + 317, + 230, + 559, + 262 + ], + "type": "text", + "content": "[24] Sourav Medya, Mohammad Rasoolinejad, Yang Yang, and Brian Uzzi. 2022. An Exploratory Study of Stock Price Movements from Earnings Calls. In Companion Proceedings of the Web Conference 2022 (WWW 2022). Association for Computing Machinery, 20-31." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 262, + 559, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 262, + 559, + 285 + ], + "spans": [ + { + "bbox": [ + 316, + 262, + 559, + 285 + ], + "type": "text", + "content": "[25] Pearl Pu, Li Chen, and Rong Hu. 2011. A user-centric evaluation framework for recommender systems. In Proceedings of the 5th ACM conference on Recommender Systems (RecSys 2011). 157-164." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 316, + 285, + 559, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 285, + 559, + 318 + ], + "spans": [ + { + "bbox": [ + 316, + 285, + 559, + 318 + ], + "type": "text", + "content": "[26] Filip Radlinski, Krisztian Balog, Bill Byrne, and Karthik Krishnamoorthi. 2019. Coached conversational preference elicitation: A case study in understanding movie preferences. In Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue (SIGDIAL 2019). 353-360." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 316, + 318, + 559, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 318, + 559, + 342 + ], + "spans": [ + { + "bbox": [ + 316, + 318, + 559, + 342 + ], + "type": "text", + "content": "[27] Filip Radlinski and Nick Craswell. 2017. A theoretical framework for conversational search. In Proceedings of the 2nd Conference on Human Information Interaction and Retrieval (CHIIR 2017). 117-126." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 316, + 342, + 559, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 342, + 559, + 382 + ], + "spans": [ + { + "bbox": [ + 316, + 342, + 559, + 382 + ], + "type": "text", + "content": "[28] Oscar Sainz, Jon Campos, Iker Garcia-Ferrero, Julien Etxaniz, Oier Lopez de Lacalle, and Eneko Agirre. 2023. NLP Evaluation in trouble: On the Need to Measure LLM Data Contamination for each Benchmark. In Findings of the Association for Computational Linguistics: EMNLP 2023, Houda Bouamor, Juan Pino, and Kalika Bali (Eds.). Association for Computational Linguistics, 10776-10787." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 316, + 382, + 559, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 382, + 559, + 397 + ], + "spans": [ + { + "bbox": [ + 316, + 382, + 559, + 397 + ], + "type": "text", + "content": "[29] Tetsuya Sakai. 2018. Laboratory experiments in information retrieval. The information retrieval series 40 (2018), 4." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 317, + 398, + 559, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 398, + 559, + 430 + ], + "spans": [ + { + "bbox": [ + 317, + 398, + 559, + 430 + ], + "type": "text", + "content": "[30] Javier Sanz-Cruzado, Edward Richards, and Richard McCreadie. 2024. FAR-AI: A Modular Platform for Investment Recommendation in the Financial Domain. In Proceedings of the 46th European Conference on Information Retrieval (ECIR 2024), Part V. Springer-Verlag, Glasgow, United Kingdom, 267-271." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 317, + 430, + 559, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 430, + 559, + 461 + ], + "spans": [ + { + "bbox": [ + 317, + 430, + 559, + 461 + ], + "type": "text", + "content": "[31] Yunfan Shao, Linyang Li, Junqi Dai, and Xipeng Qiu. 2023. Character-LLM: A Trainable Agent for Role-Playing. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing (EMNLP 2023). Association for Computational Linguistics, 13153-13187." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 317, + 461, + 559, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 461, + 559, + 485 + ], + "spans": [ + { + "bbox": [ + 317, + 461, + 559, + 485 + ], + "type": "text", + "content": "[32] Tuva Lunde Smestad and Frode Volden. 2019. Chatbot personalities matters: improving the user experience of chatbot interfaces. In 5th International Conference Internet Science: (INSCI 2018). Springer, 170-181." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 317, + 485, + 559, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 485, + 559, + 501 + ], + "spans": [ + { + "bbox": [ + 317, + 485, + 559, + 501 + ], + "type": "text", + "content": "[33] David J Streich. 2023. Risk preference elicitation and financial advice taking. Journal of Behavioral Finance 24, 3 (2023), 259-275." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 317, + 501, + 559, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 501, + 559, + 525 + ], + "spans": [ + { + "bbox": [ + 317, + 501, + 559, + 525 + ], + "type": "text", + "content": "[34] Yueming Sun and Yi Zhang. 2018. Conversational recommender system. In Proceedings of the 41th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR 2018), 235-244." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 317, + 525, + 559, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 525, + 559, + 565 + ], + "spans": [ + { + "bbox": [ + 317, + 525, + 559, + 565 + ], + "type": "text", + "content": "[35] Takehiro Takayanagi, Kiyoshi Izumi, Atsuo Kato, Naoyuki Tsunedomi, and Yukina Abe. 2023. Personalized Stock Recommendation with Investors' Attention and Contextual Information. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR 2023). Association for Computing Machinery, 3339-3343." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 317, + 565, + 559, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 565, + 559, + 605 + ], + "spans": [ + { + "bbox": [ + 317, + 565, + 559, + 605 + ], + "type": "text", + "content": "[36] Johanne R. Trippas, Sara Fahad Dawood Al Lawati, Joel Mackenzie, and Luke Gallagher. 2024. What do Users Really Ask Large Language Models? An Initial Log Analysis of Google Bard Interactions in the Wild. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR 2024). 2703-2707." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 317, + 605, + 559, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 605, + 559, + 644 + ], + "spans": [ + { + "bbox": [ + 317, + 605, + 559, + 644 + ], + "type": "text", + "content": "[37] Johanne R. Trippas, Luke Gallagher, and Joel Mackenzie. 2024. Re-evaluating the Command-and-Control Paradigm in Conversational Search Interactions. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management (CIKM 2024). Association for Computing Machinery, 2260-2270." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 317, + 644, + 559, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 644, + 559, + 669 + ], + "spans": [ + { + "bbox": [ + 317, + 644, + 559, + 669 + ], + "type": "text", + "content": "[38] Patchara Vanichvasin. 2021. Chatbot Development as a Digital Learning Tool to Increase Students' Research Knowledge. International Education Studies 14, 2 (2021), 44-53." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 317, + 669, + 559, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 669, + 559, + 692 + ], + "spans": [ + { + "bbox": [ + 317, + 669, + 559, + 692 + ], + "type": "text", + "content": "[39] Xuena Wang, Xueting Li, Zi Yin, Yue Wu, and Jia Liu. 2023. Emotional intelligence of large language models. Journal of Pacific Rim Psychology 17 (2023), 18344909231213958." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 316, + 692, + 559, + 709 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 692, + 559, + 709 + ], + "spans": [ + { + "bbox": [ + 316, + 692, + 559, + 709 + ], + "type": "text", + "content": "[40] Pontus Wärnestäl. 2005. User evaluation of a conversational recommender system. In Proceedings of the 4th Workshop on Knowledge and Reasoning in Practical" + } + ] + } + ], + "index": 44 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 173, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 173, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 173, + 69 + ], + "type": "text", + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 507, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 507, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 507, + 60, + 558, + 69 + ], + "type": "text", + "content": "Takayanagi et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 86, + 296, + 175 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 67, + 86, + 119, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 86, + 119, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 86, + 119, + 95 + ], + "type": "text", + "content": "Dialogue Systems." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 95, + 296, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 95, + 296, + 118 + ], + "spans": [ + { + "bbox": [ + 52, + 95, + 296, + 118 + ], + "type": "text", + "content": "[41] Hamed Zamani, Johanne R Trippas, Jeff Dalton, Filip Radlinski, et al. 2023. Conversational information seeking. Foundations and Trends in Information Retrieval 17, 3-4 (2023), 244-456." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 118, + 296, + 143 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 118, + 296, + 143 + ], + "spans": [ + { + "bbox": [ + 52, + 118, + 296, + 143 + ], + "type": "text", + "content": "[42] Markus Zanker, Laurens Rook, and Dietmar Jannach. 2019. Measuring the impact of online personalisation: Past, present and future. International Journal of Human-Computer Studies 131 (2019), 160–168." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 143, + 296, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 143, + 296, + 175 + ], + "spans": [ + { + "bbox": [ + 52, + 143, + 296, + 175 + ], + "type": "text", + "content": "[43] Yongfeng Zhang, Xu Chen, Qingyao Ai, Liu Yang, and W Bruce Croft. 2018. Towards conversational search and recommendation: System ask, user respond. In Proceedings of the 27th ACM International Conference on Information and Knowledge Management (CIKM 2018). 177-186." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 86, + 559, + 167 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 317, + 86, + 559, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 86, + 559, + 118 + ], + "spans": [ + { + "bbox": [ + 317, + 86, + 559, + 118 + ], + "type": "text", + "content": "[44] Huaqin Zhao, Zhengliang Liu, Zihao Wu, Yiwei Li, Tianze Yang, Peng Shu, Shaochen Xu, Haixing Dai, Lin Zhao, Gengchen Mai, et al. 2024. Revolutionizing Finance with LLMs: An Overview of Applications and Insights. arXiv preprint arXiv:2401.11641 (2024)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 317, + 118, + 559, + 143 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 118, + 559, + 143 + ], + "spans": [ + { + "bbox": [ + 317, + 118, + 559, + 143 + ], + "type": "text", + "content": "[45] Dávid Zibriczky. 2016. Recommender systems meet finance: a literature review. In Proceedings of the 2nd International Workshop on Personalization & Recommender Systems in Financial Services (FinRec 2016). 1-10." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 316, + 143, + 559, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 143, + 559, + 167 + ], + "spans": [ + { + "bbox": [ + 316, + 143, + 559, + 167 + ], + "type": "text", + "content": "[46] Liv Ziegfeld, Daan Di Scala, and Anita HM Cremers. 2025. The effect of preference elicitation methods on the user experience in conversational recommender systems. Computer Speech & Language 89 (2025), 101696." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 248, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 248, + 68 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 248, + 68 + ], + "type": "text", + "content": "Are Generative AI Agents Effective Personalized Financial Advisors?" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 438, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 438, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 438, + 60, + 558, + 69 + ], + "type": "text", + "content": "SIGIR 2025, July 13-18, 2018, Padua, Italy" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05888/a802a693-b787-42ea-9506-1d20b2ad6f02_content_list.json b/data/2025/2504_05xxx/2504.05888/a802a693-b787-42ea-9506-1d20b2ad6f02_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..b13e1b7d7c74375d728b2c1faa294134dd56509c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05888/a802a693-b787-42ea-9506-1d20b2ad6f02_content_list.json @@ -0,0 +1,572 @@ +[ + { + "type": "text", + "text": "UVG-VPC: Voxelized Point Cloud Dataset for Visual Volumetric Video-based Coding", + "text_level": 1, + "bbox": [ + 147, + 32, + 848, + 98 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Guillaume Gautier, Alexandre Mercat, Louis Fréneau, Mikko Pitkänen, and Jarno Vanne \nUltra Video Group, Tampere University, Tampere, Finland", + "bbox": [ + 223, + 112, + 774, + 137 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{guillaume.gautier, alexandre.mercat, louis.freneau, mikko.pitkanen, jarno.vanne} @tuni.fi", + "bbox": [ + 221, + 137, + 774, + 149 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract- Point cloud compression has become a crucial factor in immersive visual media processing and streaming. This paper presents a new open dataset called UVG-VPC for the development, evaluation, and validation of MPEG Visual Volumetric Video-based Coding (V3C) technology. The dataset is distributed under its own non-commercial license. It consists of 12 point cloud test video sequences of diverse characteristics with respect to the motion, RGB texture, 3D geometry, and surface occlusion of the points. Each sequence is 10 seconds long and comprises 250 frames captured at 25 frames per second. The sequences are voxelized with a geometry precision of 9 to 12 bits, and the voxel color attributes are represented as 8-bit RGB values. The dataset also includes associated normals that make it more suitable for evaluating point cloud compression solutions. The main objective of releasing the UVG-VPC dataset is to foster the development of V3C technologies and thereby shape the future in this field.", + "bbox": [ + 68, + 167, + 485, + 376 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords—Open dataset, point cloud, Visual Volumetric Video-based Coding (V3C), Video-based Point Cloud Compression (V-PCC), Extended Reality (XR)", + "bbox": [ + 70, + 388, + 485, + 426 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "I. INTRODUCTION", + "text_level": 1, + "bbox": [ + 203, + 436, + 341, + 448 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advances in volumetric visual media technologies have opened a plethora of opportunities for Extended Reality (XR). The state-of-the-art volumetric sensing and capturing technologies allow for the creation of detailed and immersive digital representations of the real world in three-dimensional (3D) space. In general, these representations can be represented as polygon meshes or point clouds that provide a realistic and detailed view of scenes from any viewpoint. Moreover, the natural and realistic viewing experience in XR is enhanced by 6 degrees of freedom (6DoF), which enables viewers to move around in the scene with both translational and rotational freedom and thereby expand the viewing space.", + "bbox": [ + 68, + 451, + 485, + 609 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Economic storage and transmission of volumetric visual data require efficient compression technologies. To that end, the Motion Picture Experts Group (MPEG) has released the Visual Volumetric Video-based Coding (V3C) standards ISO/IEC 23090-5 [1] to compress dynamic volumetric scenes for XR applications, including gaming, sports broadcasting, and motion pictures. V3C can be used to compress various types of volumetric content, such as point clouds, immersive video with depth, and mesh representations of visual volumetric frames. For the time being, V3C includes two standards: Video-based Point Cloud Compression (V-PCC) [2], and MPEG immersive video (MIV) [3], [4], of which this paper focuses on the V-PCC standard.", + "bbox": [ + 68, + 615, + 485, + 785 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Rate-distortion (RD) performance of video codes is typically evaluated with objective and subjective metrics, which involves a trade-off between coding efficiency and loss of information. Conducting these quality assessments comprehensively calls for representative datasets that cover a broad range of content (e.g., motion, texture, or occlusion).", + "bbox": [ + 68, + 791, + 485, + 872 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Given that V-PCC is commonly used for telecommunication and XR applications [4], the test set should be composed of voxelized point cloud full-body human subjects, as shown by the Common Test Conditions (CTC) for V-PCC [5].", + "bbox": [ + 507, + 167, + 924, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Table I lists the existing open point cloud datasets of full human body. The first two of them are not voxelized [6], [7], whereas the remaining three have limitations in geometry precision and size [8]–[10]. Hence, none of them is optimal for the development and evaluation of V-PCC tools, so there is an urgent need for high-quality datasets that contain real-world scenes with multifaceted content and motion.", + "bbox": [ + 507, + 225, + 926, + 318 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This paper presents a new open dataset called UVG-VPC that is made up of 12 voxelized point cloud test sequences. Each sequence is $10\\mathrm{s}$ in length, comprises 250 frames captured at a frame rate of 25 frame per second (fps), and has RGB attribute precision of 8 bits and a geometry precision of 9, 10, 11, and 12 bits. The dataset is available online at", + "bbox": [ + 507, + 324, + 926, + 403 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://ultravideo.fi/UVG-VPC/", + "bbox": [ + 606, + 406, + 826, + 420 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "It is released under its own non-commercial license [11]. Additionally, the associated normals are provided for all sequences. To the best of our knowledge, the proposed dataset is the first and only one that has been entirely designed for the development, evaluation, and validation of V-PCC coding technologies. The UVG-VPC dataset seeks to serve as a valuable resource for researchers and practitioners in the field of volumetric data compression and beyond.", + "bbox": [ + 507, + 426, + 924, + 531 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The remainder of the paper is outlined as follows. Section II describes the volumetric capture studio setup used to obtain the needed data. Section III details the proposed workflow for voxelized point cloud generation. Section IV introduces our UVG-VPC dataset and discusses its characteristics. Finally, Section V concludes the paper.", + "bbox": [ + 507, + 537, + 924, + 615 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "II. VOLUMETRIC CAPTURE STUDIO", + "text_level": 1, + "bbox": [ + 583, + 625, + 835, + 638 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The proposed dataset was captured with the volumetric capture studio developed by Mantis Vision [12]. Fig. 1 illustrates the studio setup that is composed of 32 (19 long and 13 short) camera units with different stereo distances. Each camera unit is composed of two RGB cameras, an IR projector, an IR (infrared) camera, and an Intel Next Unit of", + "bbox": [ + 507, + 642, + 926, + 722 + ], + "page_idx": 0 + }, + { + "type": "table", + "img_path": "images/949f10347b71bcf7a724678c83278f617abc44cc6e4d50267a87348b633002f1.jpg", + "table_caption": [ + "TABLE I. EXISTING OPEN POINT CLOUD DATASETS OF FULL HUMAN BODY" + ], + "table_footnote": [ + "*Partially included in the CTC for V-PCC [5]." + ], + "table_body": "
RefDataset#seq.Fps#frames#camsVoxelized/ geometry precision
[6]CWIPC-SXR2130596–27687No
[7]Volograms & V-SENSE330149–183012/60No
[8]8iVFBv2*43030042Yes/10bits
[9]Owlii*430600-Yes/11bits
[10]8iVSLF7301, 30039Yes/12bits
OurUVG-VPC122525096Yes/9–12bits
", + "bbox": [ + 515, + 769, + 927, + 887 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "This work was supported in part by the XR Simulation and Presence at the Cloud Edge (XR-SPACE) project led by Nokia and funded by Business Finland, and the Academy of Finland (decision no. 349216).", + "bbox": [ + 68, + 883, + 482, + 913 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/f242914588f033803c2e5584c7b701b20ec0e783d3b3edd51333f51b6f080b0a.jpg", + "image_caption": [ + "Fig. 1. Volumetric capture setup." + ], + "image_footnote": [], + "bbox": [ + 152, + 61, + 423, + 210 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/75688d5d77774870bb82790d226ffd56321eb2120c7a0cf4564f1f6e2d72b4a4.jpg", + "table_caption": [ + "TABLE II. SPECIFICATION OF SHORT AND LONG CAMERA UNITS" + ], + "table_footnote": [], + "table_body": "
Camera unit typeLong (×19)Short (×13)
RGB camera ×2SpecificationUI-328xCP-CUI-308xCP-C
Resolution2456×20542456×2054
Stereo distance~30cm~10cm
IR cameraSpecificationUI-314xCP-M
Resolution640×512
Intel NUCProcessorIntel(R) Core(TM) i7-8665U CPU @ 1.90GHz
Memory32 GB
Hard driveSamsung 970 EVO Plus SSD 1Tb
", + "bbox": [ + 73, + 250, + 482, + 370 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Computing (NUC); their specifications are given in Table II. The camera units are connected through a tree topology using 10Gbps switches, with four camera units connected to each switch, and two switches connected to the render computer. The studio is able to capture volumetric video at up to 25 fps.", + "bbox": [ + 70, + 378, + 485, + 445 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The studio features 40 LED tubes of $50\\mathrm{W}$ and a Sync LED that flashes and triggers at the recording frame rate. Cameras capture images at slightly different times to avoid IR interference, with opposite cameras exposing at the same time. The studio is set up to a height of $2.5\\mathrm{m}$ , with a diameter of $3\\mathrm{m}$ , and it allows scanning of a scene with a height of $2.2\\mathrm{m}$ and a diameter of $1.6\\mathrm{m}$ . To enhance the capture quality of faces, most cameras are located on the upper part of the body.", + "bbox": [ + 70, + 450, + 485, + 556 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "III. VOXELIZED POINT CLOUD GENERATION", + "text_level": 1, + "bbox": [ + 110, + 564, + 423, + 577 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Fig. 2 depicts the five steps needed to generate the proposed voxelized point cloud dataset. The first two steps are processed by the off-the-shelf equipment of the volumetric capture studio. The remaining three steps are designed in this work to make our sequences matching the format of the sequences from CTC for V-PCC [5].", + "bbox": [ + 70, + 582, + 485, + 661 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) Point cloud acquisition is executed by the camera units and the render computer. The camera units capture both RGB and IR data, which are fused by the NUCs into a point cloud structure. Both camera data and generated point clouds are sent over the network to the render computer that merges them into a single Raw Merged Point Cloud.", + "2) Mesh generation is used to create a Mesh from the Raw Merged Point Cloud with off-the-shelf Poisson surface reconstruction algorithm provided with the volumetric capture studio.", + "3) Mesh sampling deploys triangle point picking from the trimesh Python library [13] to generate a Sampled Mesh Point Cloud. After studying the trade-off between having a sufficient number of points for the following voxelization process and a reasonable memory footprint, the number of sampled points was fixed to 10 million." + ], + "bbox": [ + 70, + 667, + 485, + 896 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/5024f55b9e1fd437fb2821dc70826e646043da398eed2e51b77c0ff590cd39a5.jpg", + "image_caption": [ + "Fig. 2. Overview of the UVG-VPC dataset generation process." + ], + "image_footnote": [], + "bbox": [ + 512, + 61, + 915, + 200 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "4) Voxelization is the process of applying point cloud data on a regular 3D grid structure, where each cell or voxel represents the presence or absence of points within its boundaries. When multiple points are involved, color attributes are averaged. For this step, the voxel size is computed as the maximum dimension of capture system bounding box across all dimensions divided by $2^{N}$ , where $N$ is the geometry precision. The UVG-VPC dataset includes Voxelized Point Clouds with a geometry precision of 9, 10, 11, and 12 bits.", + "5) Normal generation computes the Normals for each Voxelized Point Cloud in the UVG-VPC dataset using open3D Python library [14] and a Knn normal estimation with 12 neighbours [15], [16]. These Normals are used in the CTC for V-PCC to calculate the quality metric known as D2 [5]. Providing the Normals enables fair comparisons between solutions." + ], + "bbox": [ + 509, + 222, + 924, + 445 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In addition to the UVG-VPC dataset, we provide open access to the intermediate data used to create the sequences. Scientific community is free to use it in voxelized point cloud generation with varying geometry precision, as well as in other areas of interest such as mesh generation or dynamic mesh compression [17].", + "bbox": [ + 507, + 450, + 924, + 529 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "IV. UVG-VPC DATASET", + "text_level": 1, + "bbox": [ + 611, + 538, + 800, + 551 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The proposed UVG-VPC dataset consists of 12 sequences, each $10\\mathrm{~s}$ long and composed of 250 frames captured at 25 fps. For each sequence, point cloud voxelized at 9, 10, 11, and 12 bits are provided with their associated normals.", + "bbox": [ + 507, + 556, + 924, + 609 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Table III lists the UVG-VPC sequences alphabetically and characterizes them with snapshots, names, content descriptions, and specific features. There is also a graph for each sequence that shows the distribution of points per frame for a geometry precision of 10 bits, as well as the corresponding average, minimum, and maximum values.", + "bbox": [ + 507, + 614, + 924, + 694 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The features of the sequences were carefully selected to make them challenging for various compression algorithms and ensure that the dataset is representative of real-world scenarios. In particular, the characterisation was done with respect to the following features:", + "bbox": [ + 507, + 700, + 924, + 766 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- speed: speed of moving points;", + "- motion field: quantity of moving points;", + "- RGB texture: texture complexity of the RGB attributes;", + "- 3D geometry: complexity of the volumetric shapes; and", + "- surface occlusion: number of (dis)appearing points." + ], + "bbox": [ + 510, + 770, + 899, + 853 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Table IV summarizes the characteristics of the UVG-VPC sequences. They all have unique characteristics, i.e., no two sequences sharing the same set of features. Furthermore, some", + "bbox": [ + 507, + 854, + 924, + 896 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/8bcf6a46d1ef70ca523d9e20c79aa14744e9c04566f6fdf3b294a8d3447a3d42.jpg", + "table_caption": [ + "TABLE III. CHARACTERISTICS OF THE VOXELIZED POINT CLOUD SEQUENCES IN THE PROPOSED UVG-VPC DATASET" + ], + "table_footnote": [], + "table_body": "
SnapshotName and description#pts/frame (10-bits)SnapshotName and description#pts/frame (10-bits)
Name: Blue Backpack\nDescription: a person takes a jacket out of a backpack and puts it on.\nSpecific features: unicolor clothes and accessories; the interaction with the accessories adds 3D geometry complexity and surface occlusions.#PTS\n1.4 #10^5\n1.2\n0.8\n0.5\nAverage: 1 051 399\nMin: 799 322\nMax: 1 302 904Name: BlueSpin\nDescription: a person dressed in blue is steadily spinning around.\nSpecific features: unicolor clothes; steady rotation about a fixed axis.7.5 #10^5\n7.7\n6.5\n0\nFrame ID\nAverage: 685 044\nMin: 679 347\nMax: 693 684
Name: BlueSquat\nDescription: a person dressed in blue is performing squats.\nSpecific features: unicolor clothes; intermittent surface occlusions due to body movements.#PTS\n7.5 #10^5\n7.7\n6.5\n0\nAverage: 718 167\nMin: 667 024\nMax: 741 177Name: CasualSpin\nDescription: a person wearing a striped shirt and jeans is steadily spinning around.\nSpecific features: textured top, unicolor bottom; steady rotation about a fixed axis.#PTS\n6.2\n5.8\n0\nFrame ID\nAverage: 599 669\nMin: 588 394\nMax: 619 662
Name: CasualSquat\nDescription: a person wearing a striped shirt and jeans is performing squats.\nSpecific features: textured top; unicolor bottom; intermittent surface occlusions due to body movements.#PTS\n6.2\n5.8\n0\nAverage: 614 419\nMin: 602 150\nMax: 629 416Name: ElegantDance\nDescription: a person wearing a long black dress is dancing and twirling around.\nSpecific features: unicolor clothes; as the person moves and twirls, the dress is folding and shifting, creating a complex 3D geometry.#PTS\n10 #10^5\n10\n0\n0\n0\nAverage: 771 714\nMin: 579 917\nMax: 1 098 016
Name: ElegantWave\nDescription: a person wearing a long black dress greets by waving hand.\nSpecific features: unicolor clothes; only upper-body movement; no dress movement; simple 3D geometry.#PTS\n10 #10^5\n5\n0\nAverage: 661 405\nMin: 650 731\nMax: 674 383Name: FlowerDance\nDescription: a person wearing a long flower dress is dancing and twirling around.\nSpecific features: textured clothes; as the person moves and twirls, the dress is folding and shifting, creating a complex 3D geometry.#PTS\n10 #10^5\n10\n0\n0\n0\nAverage: 755 299\nMin: 650 961\nMax: 913 621
Name: FlowerWave\nDescription: a person wearing a long flower dress greets by waving hand.\nSpecific features: textured clothes; only upper-body movement; no dress movement; simple 3D geometry.#PTS\n10 #10^5\n6\n0\nAverage: 691 334\nMin: 659 972\nMax: 708 898Name: Gymnast\nDescription: a person stands on one leg and does a leg hold.\nSpecific features: unicolor clothes; leg movement creates surface occlusion.#PTS\n5.5 #10^5\n5\n0\n0\n0\nAverage: 523 078\nMin: 509 576\nMax: 551 604
Name: HelloGoodbye\nDescription: a person wearing a long black dress enters the scene, greets by waving hand, and leaves the scene.\nSpecific features: unicolor clothes; empty capture space at sequence start and end.#PTS\n10 #10^5\n5\n0\nAverage: 639 807\nMin: 133 276\nMax: 929 588Name: ReadyForWinter\nDescription: a person puts on a beanie and a scarf.\nSpecific features: unicolor clothes; textured scarf; interaction with accessories creates complex surface structures and surface occlusions.#PTS\n7.0 #10^5\n7.0\n7.0\n0\nFrame ID\nAverage: 794 317\nMin: 730 913\nMax: 883 642
", + "bbox": [ + 77, + 74, + 919, + 653 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/5be5397b2dddb8bf27fafc0d3ae7ba60ba325d54a8206b360cdde882b0effc85.jpg", + "table_caption": [ + "TABLE IV. UVG-VPC DATASET CHARACTERIZATION" + ], + "table_footnote": [], + "table_body": "
SequenceSpeedMotion fieldRGB texture3D geometrySurface occlusion
BlueBackpackFastMediumSimpleComplexPlenty of
BlueSpinMediumDenseSimpleSimpleLittle
BlueSquatFastDenseSimpleMediumMedium
CasualSpinMediumDenseMediumSimpleLittle
CasualSquatFastDenseMediumMediumMedium
ElegantDanceFastDenseSimpleComplexPlenty of
ElegantWaveSlowSparseSimpleSimpleLittle
FlowerDanceFastDenseComplexComplexPlenty of
FlowerWaveSlowSparseComplexSimpleLittle
GymnastMediumMediumSimpleSimpleMedium
HelloGoodbyeMediumMediumSimpleMediumPlenty of
ReadyForWinterMediumMediumMediumComplexPlenty of
", + "bbox": [ + 73, + 675, + 487, + 841 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "sequences were specifically designed to contrast with each other in terms of one or more of these criteria.", + "bbox": [ + 70, + 847, + 485, + 872 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "V. CONCLUSION", + "text_level": 1, + "bbox": [ + 645, + 661, + 773, + 671 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This paper presented the UVG-VPC open dataset, which has been carefully designed to facilitate the development, evaluation, and validation of V-PCC coding technology. The dataset consists of 12 voxelized point cloud sequences and associated normals. We believe that the availability of the UVG-VPC dataset will enable researchers and practitioners to advance the state-of-the-art in point cloud compression and foster its deployment in immersive visual media applications.", + "bbox": [ + 507, + 678, + 924, + 784 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENT", + "text_level": 1, + "bbox": [ + 647, + 794, + 788, + 804 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This work was carried out with the support of Centre for Immersive Visual Technologies (CIVIT) research infrastructure, Tampere University, Finland. In addition, the authors wish to acknowledge CSC - IT Center for Science, Finland, for computational and storage resources.", + "bbox": [ + 507, + 810, + 924, + 876 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 233, + 64, + 322, + 76 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] ISO/IEC 23090-5:2021. \"Information technology — coded representation of immersive media — part 5: visual volumetric video-based coding (V3C) and video-based point cloud compression (V-PCC),\" Jun. 2021.", + "[2] D. Graziosi, et al., \"An overview of ongoing point cloud compression standardization activities: video-based (V-PCC) and geometry-based (G-PCC),\" APSIPA Trans. Signal Information Process., vol. 9, Apr. 2020.", + "[3] J. M. Boyce, et al., \"MPEG immersive video coding standard,\" Proc. IEEE, vol. 109, no. 9, pp. 1521-1536, Sep. 2021.", + "[4] V. K. M. Vadakital, et al., “The MPEG immersive video standard—current status and future outlook,” IEEE MultiMedia, vol. 29, no. 3, pp. 101–111, Jul.–Sep. 2022.", + "[5] ISO/IEC JTC1/SC29/WG11, \"Common test conditions for V3C and V-PCC,\" Document N19518, Online, Jul. 2020.", + "[6] I. Reimat, et al., \"CWIPC-SXR: point cloud dynamic human dataset for social XR,\" in Proc. ACM Multimedia Sys. Conf., pp. 300-306, Istanbul, Turkey, Sep. 2021.", + "[7] R. Pagés, K. Amplanitis, J. Ondrej, E. Zerman, and A. Smolic, \"Volograms & V-SENSE volumetric video dataset,\" Mar. 2022.", + "[8] E. d'Eon, B. Harrison, T. Myers, and P. A. Chou, \"8i voxelized full bodies - a voxelized point cloud dataset,\" ISO/IEC JTC1/SC29 Joint WG11/WG1, Document WG11M40059/WG1M74006, Geneva, Switzerland, Jan. 2017." + ], + "bbox": [ + 73, + 80, + 485, + 344 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[9] Y. Xu, Y. Lu, and Z. Wen, \"Owlii dynamic human mesh sequence dataset,\" ISO/IEC JTC1/SC29/WG11 Document M41658, Macau, China, Oct. 2017.", + "[10] M. Krivokuca, P. A. Chou, and P. Savill, \"8i voxelized surface light field (8iVSLF) dataset,\" ISO/IEC JTC1/SC29 WG11, Document M42914, Ljubljana, Slovenia, Jul. 2018.", + "[11] UVG-VPC Licence, [Online], Available: https://ultravideo.fi/UVG-VPC/ licence.pdf, Accessed: May. 26, 2023.", + "[12] Mantis Vision Website, [Online], Available: https://mantis-vision.com/, Accessed: Apr. 26, 2023.", + "[13] E. W. Weisstein, “Triangle point picking,” [Online], Available: https://mathworld.wolfram.com/TrianglePointPicking.html, Accessed: Apr. 26, 2023.", + "[14] Q.-Y. Zhou, J. Park, and V. Koltun, \"Open3D: a modern library for 3D data processing,\" arXiv:1801.09847, Jan. 2018.", + "[15] D. Tian, H. Ochimizu, C. Feng, R. Cohen, and A. Vetro, “Evaluation metrics for point cloud compression,” ISO/IEC JTC1/SC29/WG11, Document M39966, Geneva, Switzerland, Jan. 2017.", + "[16] D. Tian, H. Ochimizu, C. Feng, R. Cohen, and A. Vetro, “Updates and integration of evaluation metric software for PCC,” ISO/IEC JTC1/SC29/WG11, Document M40522, Hobart, Australia, Apr. 2017.", + "[17] M. Wien, J. Jung, and V. Baroncini, \"Formal visual evaluation and study of objective metrics for MPEG dynamic mesh coding,\" in Proc. Eur. Workshop Vis. Inf. Process., Lisbon, Portugal, Sep. 2022." + ], + "bbox": [ + 512, + 63, + 924, + 326 + ], + "page_idx": 3 + } +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05888/a802a693-b787-42ea-9506-1d20b2ad6f02_model.json b/data/2025/2504_05xxx/2504.05888/a802a693-b787-42ea-9506-1d20b2ad6f02_model.json new file mode 100644 index 0000000000000000000000000000000000000000..053ade194e8f57c266a5764f48612757fb6403d3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05888/a802a693-b787-42ea-9506-1d20b2ad6f02_model.json @@ -0,0 +1,879 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.149, + 0.033, + 0.85, + 0.099 + ], + "angle": 0, + "content": "UVG-VPC: Voxelized Point Cloud Dataset for Visual Volumetric Video-based Coding" + }, + { + "type": "text", + "bbox": [ + 0.224, + 0.114, + 0.775, + 0.138 + ], + "angle": 0, + "content": "Guillaume Gautier, Alexandre Mercat, Louis Fréneau, Mikko Pitkänen, and Jarno Vanne \nUltra Video Group, Tampere University, Tampere, Finland" + }, + { + "type": "text", + "bbox": [ + 0.223, + 0.139, + 0.776, + 0.151 + ], + "angle": 0, + "content": "{guillaume.gautier, alexandre.mercat, louis.freneau, mikko.pitkanen, jarno.vanne} @tuni.fi" + }, + { + "type": "text", + "bbox": [ + 0.07, + 0.168, + 0.486, + 0.378 + ], + "angle": 0, + "content": "Abstract- Point cloud compression has become a crucial factor in immersive visual media processing and streaming. This paper presents a new open dataset called UVG-VPC for the development, evaluation, and validation of MPEG Visual Volumetric Video-based Coding (V3C) technology. The dataset is distributed under its own non-commercial license. It consists of 12 point cloud test video sequences of diverse characteristics with respect to the motion, RGB texture, 3D geometry, and surface occlusion of the points. Each sequence is 10 seconds long and comprises 250 frames captured at 25 frames per second. The sequences are voxelized with a geometry precision of 9 to 12 bits, and the voxel color attributes are represented as 8-bit RGB values. The dataset also includes associated normals that make it more suitable for evaluating point cloud compression solutions. The main objective of releasing the UVG-VPC dataset is to foster the development of V3C technologies and thereby shape the future in this field." + }, + { + "type": "text", + "bbox": [ + 0.071, + 0.389, + 0.487, + 0.428 + ], + "angle": 0, + "content": "Keywords—Open dataset, point cloud, Visual Volumetric Video-based Coding (V3C), Video-based Point Cloud Compression (V-PCC), Extended Reality (XR)" + }, + { + "type": "title", + "bbox": [ + 0.205, + 0.437, + 0.342, + 0.449 + ], + "angle": 0, + "content": "I. INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.07, + 0.453, + 0.486, + 0.611 + ], + "angle": 0, + "content": "Recent advances in volumetric visual media technologies have opened a plethora of opportunities for Extended Reality (XR). The state-of-the-art volumetric sensing and capturing technologies allow for the creation of detailed and immersive digital representations of the real world in three-dimensional (3D) space. In general, these representations can be represented as polygon meshes or point clouds that provide a realistic and detailed view of scenes from any viewpoint. Moreover, the natural and realistic viewing experience in XR is enhanced by 6 degrees of freedom (6DoF), which enables viewers to move around in the scene with both translational and rotational freedom and thereby expand the viewing space." + }, + { + "type": "text", + "bbox": [ + 0.07, + 0.617, + 0.486, + 0.786 + ], + "angle": 0, + "content": "Economic storage and transmission of volumetric visual data require efficient compression technologies. To that end, the Motion Picture Experts Group (MPEG) has released the Visual Volumetric Video-based Coding (V3C) standards ISO/IEC 23090-5 [1] to compress dynamic volumetric scenes for XR applications, including gaming, sports broadcasting, and motion pictures. V3C can be used to compress various types of volumetric content, such as point clouds, immersive video with depth, and mesh representations of visual volumetric frames. For the time being, V3C includes two standards: Video-based Point Cloud Compression (V-PCC) [2], and MPEG immersive video (MIV) [3], [4], of which this paper focuses on the V-PCC standard." + }, + { + "type": "text", + "bbox": [ + 0.07, + 0.792, + 0.486, + 0.873 + ], + "angle": 0, + "content": "Rate-distortion (RD) performance of video codes is typically evaluated with objective and subjective metrics, which involves a trade-off between coding efficiency and loss of information. Conducting these quality assessments comprehensively calls for representative datasets that cover a broad range of content (e.g., motion, texture, or occlusion)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.168, + 0.925, + 0.221 + ], + "angle": 0, + "content": "Given that V-PCC is commonly used for telecommunication and XR applications [4], the test set should be composed of voxelized point cloud full-body human subjects, as shown by the Common Test Conditions (CTC) for V-PCC [5]." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.227, + 0.927, + 0.319 + ], + "angle": 0, + "content": "Table I lists the existing open point cloud datasets of full human body. The first two of them are not voxelized [6], [7], whereas the remaining three have limitations in geometry precision and size [8]–[10]. Hence, none of them is optimal for the development and evaluation of V-PCC tools, so there is an urgent need for high-quality datasets that contain real-world scenes with multifaceted content and motion." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.325, + 0.927, + 0.404 + ], + "angle": 0, + "content": "This paper presents a new open dataset called UVG-VPC that is made up of 12 voxelized point cloud test sequences. Each sequence is \\(10\\mathrm{s}\\) in length, comprises 250 frames captured at a frame rate of 25 frame per second (fps), and has RGB attribute precision of 8 bits and a geometry precision of 9, 10, 11, and 12 bits. The dataset is available online at" + }, + { + "type": "text", + "bbox": [ + 0.608, + 0.407, + 0.827, + 0.421 + ], + "angle": 0, + "content": "https://ultravideo.fi/UVG-VPC/" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.427, + 0.926, + 0.532 + ], + "angle": 0, + "content": "It is released under its own non-commercial license [11]. Additionally, the associated normals are provided for all sequences. To the best of our knowledge, the proposed dataset is the first and only one that has been entirely designed for the development, evaluation, and validation of V-PCC coding technologies. The UVG-VPC dataset seeks to serve as a valuable resource for researchers and practitioners in the field of volumetric data compression and beyond." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.538, + 0.926, + 0.617 + ], + "angle": 0, + "content": "The remainder of the paper is outlined as follows. Section II describes the volumetric capture studio setup used to obtain the needed data. Section III details the proposed workflow for voxelized point cloud generation. Section IV introduces our UVG-VPC dataset and discusses its characteristics. Finally, Section V concludes the paper." + }, + { + "type": "title", + "bbox": [ + 0.584, + 0.626, + 0.836, + 0.639 + ], + "angle": 0, + "content": "II. VOLUMETRIC CAPTURE STUDIO" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.643, + 0.927, + 0.723 + ], + "angle": 0, + "content": "The proposed dataset was captured with the volumetric capture studio developed by Mantis Vision [12]. Fig. 1 illustrates the studio setup that is composed of 32 (19 long and 13 short) camera units with different stereo distances. Each camera unit is composed of two RGB cameras, an IR projector, an IR (infrared) camera, and an Intel Next Unit of" + }, + { + "type": "table_caption", + "bbox": [ + 0.517, + 0.758, + 0.927, + 0.769 + ], + "angle": 0, + "content": "TABLE I. EXISTING OPEN POINT CLOUD DATASETS OF FULL HUMAN BODY" + }, + { + "type": "table", + "bbox": [ + 0.517, + 0.77, + 0.929, + 0.888 + ], + "angle": 0, + "content": "
RefDataset#seq.Fps#frames#camsVoxelized/ geometry precision
[6]CWIPC-SXR2130596–27687No
[7]Volograms & V-SENSE330149–183012/60No
[8]8iVFBv2*43030042Yes/10bits
[9]Owlii*430600-Yes/11bits
[10]8iVSLF7301, 30039Yes/12bits
OurUVG-VPC122525096Yes/9–12bits
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.517, + 0.889, + 0.769, + 0.9 + ], + "angle": 0, + "content": "*Partially included in the CTC for V-PCC [5]." + }, + { + "type": "page_footnote", + "bbox": [ + 0.07, + 0.884, + 0.483, + 0.914 + ], + "angle": 0, + "content": "This work was supported in part by the XR Simulation and Presence at the Cloud Edge (XR-SPACE) project led by Nokia and funded by Business Finland, and the Academy of Finland (decision no. 349216)." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.154, + 0.062, + 0.425, + 0.211 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.072, + 0.213, + 0.255, + 0.227 + ], + "angle": 0, + "content": "Fig. 1. Volumetric capture setup." + }, + { + "type": "table_caption", + "bbox": [ + 0.101, + 0.239, + 0.455, + 0.25 + ], + "angle": 0, + "content": "TABLE II. SPECIFICATION OF SHORT AND LONG CAMERA UNITS" + }, + { + "type": "table", + "bbox": [ + 0.074, + 0.252, + 0.484, + 0.372 + ], + "angle": 0, + "content": "
Camera unit typeLong (×19)Short (×13)
RGB camera ×2SpecificationUI-328xCP-CUI-308xCP-C
Resolution2456×20542456×2054
Stereo distance~30cm~10cm
IR cameraSpecificationUI-314xCP-M
Resolution640×512
Intel NUCProcessorIntel(R) Core(TM) i7-8665U CPU @ 1.90GHz
Memory32 GB
Hard driveSamsung 970 EVO Plus SSD 1Tb
" + }, + { + "type": "text", + "bbox": [ + 0.071, + 0.379, + 0.486, + 0.446 + ], + "angle": 0, + "content": "Computing (NUC); their specifications are given in Table II. The camera units are connected through a tree topology using 10Gbps switches, with four camera units connected to each switch, and two switches connected to the render computer. The studio is able to capture volumetric video at up to 25 fps." + }, + { + "type": "text", + "bbox": [ + 0.071, + 0.451, + 0.487, + 0.557 + ], + "angle": 0, + "content": "The studio features 40 LED tubes of \\(50\\mathrm{W}\\) and a Sync LED that flashes and triggers at the recording frame rate. Cameras capture images at slightly different times to avoid IR interference, with opposite cameras exposing at the same time. The studio is set up to a height of \\(2.5\\mathrm{m}\\), with a diameter of \\(3\\mathrm{m}\\), and it allows scanning of a scene with a height of \\(2.2\\mathrm{m}\\) and a diameter of \\(1.6\\mathrm{m}\\). To enhance the capture quality of faces, most cameras are located on the upper part of the body." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.565, + 0.424, + 0.578 + ], + "angle": 0, + "content": "III. VOXELIZED POINT CLOUD GENERATION" + }, + { + "type": "text", + "bbox": [ + 0.071, + 0.583, + 0.487, + 0.662 + ], + "angle": 0, + "content": "Fig. 2 depicts the five steps needed to generate the proposed voxelized point cloud dataset. The first two steps are processed by the off-the-shelf equipment of the volumetric capture studio. The remaining three steps are designed in this work to make our sequences matching the format of the sequences from CTC for V-PCC [5]." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.668, + 0.487, + 0.759 + ], + "angle": 0, + "content": "1) Point cloud acquisition is executed by the camera units and the render computer. The camera units capture both RGB and IR data, which are fused by the NUCs into a point cloud structure. Both camera data and generated point clouds are sent over the network to the render computer that merges them into a single Raw Merged Point Cloud." + }, + { + "type": "text", + "bbox": [ + 0.071, + 0.762, + 0.486, + 0.815 + ], + "angle": 0, + "content": "2) Mesh generation is used to create a Mesh from the Raw Merged Point Cloud with off-the-shelf Poisson surface reconstruction algorithm provided with the volumetric capture studio." + }, + { + "type": "text", + "bbox": [ + 0.071, + 0.818, + 0.486, + 0.897 + ], + "angle": 0, + "content": "3) Mesh sampling deploys triangle point picking from the trimesh Python library [13] to generate a Sampled Mesh Point Cloud. After studying the trade-off between having a sufficient number of points for the following voxelization process and a reasonable memory footprint, the number of sampled points was fixed to 10 million." + }, + { + "type": "list", + "bbox": [ + 0.071, + 0.668, + 0.487, + 0.897 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.062, + 0.916, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.511, + 0.201, + 0.853, + 0.214 + ], + "angle": 0, + "content": "Fig. 2. Overview of the UVG-VPC dataset generation process." + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.223, + 0.926, + 0.354 + ], + "angle": 0, + "content": "4) Voxelization is the process of applying point cloud data on a regular 3D grid structure, where each cell or voxel represents the presence or absence of points within its boundaries. When multiple points are involved, color attributes are averaged. For this step, the voxel size is computed as the maximum dimension of capture system bounding box across all dimensions divided by \\(2^{N}\\), where \\(N\\) is the geometry precision. The UVG-VPC dataset includes Voxelized Point Clouds with a geometry precision of 9, 10, 11, and 12 bits." + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.357, + 0.926, + 0.447 + ], + "angle": 0, + "content": "5) Normal generation computes the Normals for each Voxelized Point Cloud in the UVG-VPC dataset using open3D Python library [14] and a Knn normal estimation with 12 neighbours [15], [16]. These Normals are used in the CTC for V-PCC to calculate the quality metric known as D2 [5]. Providing the Normals enables fair comparisons between solutions." + }, + { + "type": "list", + "bbox": [ + 0.51, + 0.223, + 0.926, + 0.447 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.451, + 0.925, + 0.53 + ], + "angle": 0, + "content": "In addition to the UVG-VPC dataset, we provide open access to the intermediate data used to create the sequences. Scientific community is free to use it in voxelized point cloud generation with varying geometry precision, as well as in other areas of interest such as mesh generation or dynamic mesh compression [17]." + }, + { + "type": "title", + "bbox": [ + 0.613, + 0.539, + 0.8, + 0.552 + ], + "angle": 0, + "content": "IV. UVG-VPC DATASET" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.557, + 0.925, + 0.61 + ], + "angle": 0, + "content": "The proposed UVG-VPC dataset consists of 12 sequences, each \\(10\\mathrm{~s}\\) long and composed of 250 frames captured at 25 fps. For each sequence, point cloud voxelized at 9, 10, 11, and 12 bits are provided with their associated normals." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.615, + 0.926, + 0.695 + ], + "angle": 0, + "content": "Table III lists the UVG-VPC sequences alphabetically and characterizes them with snapshots, names, content descriptions, and specific features. There is also a graph for each sequence that shows the distribution of points per frame for a geometry precision of 10 bits, as well as the corresponding average, minimum, and maximum values." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.701, + 0.926, + 0.767 + ], + "angle": 0, + "content": "The features of the sequences were carefully selected to make them challenging for various compression algorithms and ensure that the dataset is representative of real-world scenarios. In particular, the characterisation was done with respect to the following features:" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.771, + 0.737, + 0.784 + ], + "angle": 0, + "content": "- speed: speed of moving points;" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.788, + 0.797, + 0.802 + ], + "angle": 0, + "content": "- motion field: quantity of moving points;" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.805, + 0.9, + 0.819 + ], + "angle": 0, + "content": "- RGB texture: texture complexity of the RGB attributes;" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.823, + 0.9, + 0.836 + ], + "angle": 0, + "content": "- 3D geometry: complexity of the volumetric shapes; and" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.84, + 0.871, + 0.854 + ], + "angle": 0, + "content": "- surface occlusion: number of (dis)appearing points." + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.771, + 0.9, + 0.854 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.856, + 0.926, + 0.897 + ], + "angle": 0, + "content": "Table IV summarizes the characteristics of the UVG-VPC sequences. They all have unique characteristics, i.e., no two sequences sharing the same set of features. Furthermore, some" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.178, + 0.062, + 0.817, + 0.073 + ], + "angle": 0, + "content": "TABLE III. CHARACTERISTICS OF THE VOXELIZED POINT CLOUD SEQUENCES IN THE PROPOSED UVG-VPC DATASET" + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.076, + 0.92, + 0.654 + ], + "angle": 0, + "content": "
SnapshotName and description#pts/frame (10-bits)SnapshotName and description#pts/frame (10-bits)
Name: Blue Backpack\nDescription: a person takes a jacket out of a backpack and puts it on.\nSpecific features: unicolor clothes and accessories; the interaction with the accessories adds 3D geometry complexity and surface occlusions.#PTS\n1.4 #10^5\n1.2\n0.8\n0.5\nAverage: 1 051 399\nMin: 799 322\nMax: 1 302 904Name: BlueSpin\nDescription: a person dressed in blue is steadily spinning around.\nSpecific features: unicolor clothes; steady rotation about a fixed axis.7.5 #10^5\n7.7\n6.5\n0\nFrame ID\nAverage: 685 044\nMin: 679 347\nMax: 693 684
Name: BlueSquat\nDescription: a person dressed in blue is performing squats.\nSpecific features: unicolor clothes; intermittent surface occlusions due to body movements.#PTS\n7.5 #10^5\n7.7\n6.5\n0\nAverage: 718 167\nMin: 667 024\nMax: 741 177Name: CasualSpin\nDescription: a person wearing a striped shirt and jeans is steadily spinning around.\nSpecific features: textured top, unicolor bottom; steady rotation about a fixed axis.#PTS\n6.2\n5.8\n0\nFrame ID\nAverage: 599 669\nMin: 588 394\nMax: 619 662
Name: CasualSquat\nDescription: a person wearing a striped shirt and jeans is performing squats.\nSpecific features: textured top; unicolor bottom; intermittent surface occlusions due to body movements.#PTS\n6.2\n5.8\n0\nAverage: 614 419\nMin: 602 150\nMax: 629 416Name: ElegantDance\nDescription: a person wearing a long black dress is dancing and twirling around.\nSpecific features: unicolor clothes; as the person moves and twirls, the dress is folding and shifting, creating a complex 3D geometry.#PTS\n10 #10^5\n10\n0\n0\n0\nAverage: 771 714\nMin: 579 917\nMax: 1 098 016
Name: ElegantWave\nDescription: a person wearing a long black dress greets by waving hand.\nSpecific features: unicolor clothes; only upper-body movement; no dress movement; simple 3D geometry.#PTS\n10 #10^5\n5\n0\nAverage: 661 405\nMin: 650 731\nMax: 674 383Name: FlowerDance\nDescription: a person wearing a long flower dress is dancing and twirling around.\nSpecific features: textured clothes; as the person moves and twirls, the dress is folding and shifting, creating a complex 3D geometry.#PTS\n10 #10^5\n10\n0\n0\n0\nAverage: 755 299\nMin: 650 961\nMax: 913 621
Name: FlowerWave\nDescription: a person wearing a long flower dress greets by waving hand.\nSpecific features: textured clothes; only upper-body movement; no dress movement; simple 3D geometry.#PTS\n10 #10^5\n6\n0\nAverage: 691 334\nMin: 659 972\nMax: 708 898Name: Gymnast\nDescription: a person stands on one leg and does a leg hold.\nSpecific features: unicolor clothes; leg movement creates surface occlusion.#PTS\n5.5 #10^5\n5\n0\n0\n0\nAverage: 523 078\nMin: 509 576\nMax: 551 604
Name: HelloGoodbye\nDescription: a person wearing a long black dress enters the scene, greets by waving hand, and leaves the scene.\nSpecific features: unicolor clothes; empty capture space at sequence start and end.#PTS\n10 #10^5\n5\n0\nAverage: 639 807\nMin: 133 276\nMax: 929 588Name: ReadyForWinter\nDescription: a person puts on a beanie and a scarf.\nSpecific features: unicolor clothes; textured scarf; interaction with accessories creates complex surface structures and surface occlusions.#PTS\n7.0 #10^5\n7.0\n7.0\n0\nFrame ID\nAverage: 794 317\nMin: 730 913\nMax: 883 642
" + }, + { + "type": "table_caption", + "bbox": [ + 0.131, + 0.663, + 0.43, + 0.673 + ], + "angle": 0, + "content": "TABLE IV. UVG-VPC DATASET CHARACTERIZATION" + }, + { + "type": "table", + "bbox": [ + 0.075, + 0.676, + 0.489, + 0.842 + ], + "angle": 0, + "content": "
SequenceSpeedMotion fieldRGB texture3D geometrySurface occlusion
BlueBackpackFastMediumSimpleComplexPlenty of
BlueSpinMediumDenseSimpleSimpleLittle
BlueSquatFastDenseSimpleMediumMedium
CasualSpinMediumDenseMediumSimpleLittle
CasualSquatFastDenseMediumMediumMedium
ElegantDanceFastDenseSimpleComplexPlenty of
ElegantWaveSlowSparseSimpleSimpleLittle
FlowerDanceFastDenseComplexComplexPlenty of
FlowerWaveSlowSparseComplexSimpleLittle
GymnastMediumMediumSimpleSimpleMedium
HelloGoodbyeMediumMediumSimpleMediumPlenty of
ReadyForWinterMediumMediumMediumComplexPlenty of
" + }, + { + "type": "text", + "bbox": [ + 0.071, + 0.848, + 0.486, + 0.873 + ], + "angle": 0, + "content": "sequences were specifically designed to contrast with each other in terms of one or more of these criteria." + }, + { + "type": "title", + "bbox": [ + 0.647, + 0.662, + 0.774, + 0.673 + ], + "angle": 0, + "content": "V. CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.679, + 0.925, + 0.785 + ], + "angle": 0, + "content": "This paper presented the UVG-VPC open dataset, which has been carefully designed to facilitate the development, evaluation, and validation of V-PCC coding technology. The dataset consists of 12 voxelized point cloud sequences and associated normals. We believe that the availability of the UVG-VPC dataset will enable researchers and practitioners to advance the state-of-the-art in point cloud compression and foster its deployment in immersive visual media applications." + }, + { + "type": "title", + "bbox": [ + 0.648, + 0.795, + 0.789, + 0.806 + ], + "angle": 0, + "content": "ACKNOWLEDGMENT" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.811, + 0.925, + 0.877 + ], + "angle": 0, + "content": "This work was carried out with the support of Centre for Immersive Visual Technologies (CIVIT) research infrastructure, Tampere University, Finland. In addition, the authors wish to acknowledge CSC - IT Center for Science, Finland, for computational and storage resources." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.234, + 0.065, + 0.324, + 0.077 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.074, + 0.081, + 0.486, + 0.126 + ], + "angle": 0, + "content": "[1] ISO/IEC 23090-5:2021. \"Information technology — coded representation of immersive media — part 5: visual volumetric video-based coding (V3C) and video-based point cloud compression (V-PCC),\" Jun. 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.074, + 0.127, + 0.486, + 0.169 + ], + "angle": 0, + "content": "[2] D. Graziosi, et al., \"An overview of ongoing point cloud compression standardization activities: video-based (V-PCC) and geometry-based (G-PCC),\" APSIPA Trans. Signal Information Process., vol. 9, Apr. 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.074, + 0.17, + 0.485, + 0.192 + ], + "angle": 0, + "content": "[3] J. M. Boyce, et al., \"MPEG immersive video coding standard,\" Proc. IEEE, vol. 109, no. 9, pp. 1521-1536, Sep. 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.074, + 0.192, + 0.486, + 0.224 + ], + "angle": 0, + "content": "[4] V. K. M. Vadakital, et al., “The MPEG immersive video standard—current status and future outlook,” IEEE MultiMedia, vol. 29, no. 3, pp. 101–111, Jul.–Sep. 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.074, + 0.225, + 0.486, + 0.246 + ], + "angle": 0, + "content": "[5] ISO/IEC JTC1/SC29/WG11, \"Common test conditions for V3C and V-PCC,\" Document N19518, Online, Jul. 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.074, + 0.247, + 0.486, + 0.279 + ], + "angle": 0, + "content": "[6] I. Reimat, et al., \"CWIPC-SXR: point cloud dynamic human dataset for social XR,\" in Proc. ACM Multimedia Sys. Conf., pp. 300-306, Istanbul, Turkey, Sep. 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.074, + 0.28, + 0.486, + 0.3 + ], + "angle": 0, + "content": "[7] R. Pagés, K. Amplanitis, J. Ondrej, E. Zerman, and A. Smolic, \"Volograms & V-SENSE volumetric video dataset,\" Mar. 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.074, + 0.301, + 0.486, + 0.345 + ], + "angle": 0, + "content": "[8] E. d'Eon, B. Harrison, T. Myers, and P. A. Chou, \"8i voxelized full bodies - a voxelized point cloud dataset,\" ISO/IEC JTC1/SC29 Joint WG11/WG1, Document WG11M40059/WG1M74006, Geneva, Switzerland, Jan. 2017." + }, + { + "type": "list", + "bbox": [ + 0.074, + 0.081, + 0.486, + 0.345 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.064, + 0.925, + 0.096 + ], + "angle": 0, + "content": "[9] Y. Xu, Y. Lu, and Z. Wen, \"Owlii dynamic human mesh sequence dataset,\" ISO/IEC JTC1/SC29/WG11 Document M41658, Macau, China, Oct. 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.098, + 0.926, + 0.129 + ], + "angle": 0, + "content": "[10] M. Krivokuca, P. A. Chou, and P. Savill, \"8i voxelized surface light field (8iVSLF) dataset,\" ISO/IEC JTC1/SC29 WG11, Document M42914, Ljubljana, Slovenia, Jul. 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.13, + 0.926, + 0.151 + ], + "angle": 0, + "content": "[11] UVG-VPC Licence, [Online], Available: https://ultravideo.fi/UVG-VPC/ licence.pdf, Accessed: May. 26, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.152, + 0.926, + 0.173 + ], + "angle": 0, + "content": "[12] Mantis Vision Website, [Online], Available: https://mantis-vision.com/, Accessed: Apr. 26, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.174, + 0.926, + 0.206 + ], + "angle": 0, + "content": "[13] E. W. Weisstein, “Triangle point picking,” [Online], Available: https://mathworld.wolfram.com/TrianglePointPicking.html, Accessed: Apr. 26, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.207, + 0.926, + 0.228 + ], + "angle": 0, + "content": "[14] Q.-Y. Zhou, J. Park, and V. Koltun, \"Open3D: a modern library for 3D data processing,\" arXiv:1801.09847, Jan. 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.229, + 0.926, + 0.26 + ], + "angle": 0, + "content": "[15] D. Tian, H. Ochimizu, C. Feng, R. Cohen, and A. Vetro, “Evaluation metrics for point cloud compression,” ISO/IEC JTC1/SC29/WG11, Document M39966, Geneva, Switzerland, Jan. 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.261, + 0.926, + 0.293 + ], + "angle": 0, + "content": "[16] D. Tian, H. Ochimizu, C. Feng, R. Cohen, and A. Vetro, “Updates and integration of evaluation metric software for PCC,” ISO/IEC JTC1/SC29/WG11, Document M40522, Hobart, Australia, Apr. 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.294, + 0.926, + 0.327 + ], + "angle": 0, + "content": "[17] M. Wien, J. Jung, and V. Baroncini, \"Formal visual evaluation and study of objective metrics for MPEG dynamic mesh coding,\" in Proc. Eur. Workshop Vis. Inf. Process., Lisbon, Portugal, Sep. 2022." + }, + { + "type": "list", + "bbox": [ + 0.513, + 0.064, + 0.926, + 0.327 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05888/a802a693-b787-42ea-9506-1d20b2ad6f02_origin.pdf b/data/2025/2504_05xxx/2504.05888/a802a693-b787-42ea-9506-1d20b2ad6f02_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d10403c822f3a263edd91a1472fb716aa25ba11a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05888/a802a693-b787-42ea-9506-1d20b2ad6f02_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28320c197ff32e940b983e9a46e597ac4124a41f36cbb395d240cae7bf44fbd2 +size 355829 diff --git a/data/2025/2504_05xxx/2504.05888/full.md b/data/2025/2504_05xxx/2504.05888/full.md new file mode 100644 index 0000000000000000000000000000000000000000..553499a1f1ec6a87b9df34aae4de3dd92279995e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05888/full.md @@ -0,0 +1,230 @@ +# UVG-VPC: Voxelized Point Cloud Dataset for Visual Volumetric Video-based Coding + +Guillaume Gautier, Alexandre Mercat, Louis Fréneau, Mikko Pitkänen, and Jarno Vanne +Ultra Video Group, Tampere University, Tampere, Finland + +{guillaume.gautier, alexandre.mercat, louis.freneau, mikko.pitkanen, jarno.vanne} @tuni.fi + +Abstract- Point cloud compression has become a crucial factor in immersive visual media processing and streaming. This paper presents a new open dataset called UVG-VPC for the development, evaluation, and validation of MPEG Visual Volumetric Video-based Coding (V3C) technology. The dataset is distributed under its own non-commercial license. It consists of 12 point cloud test video sequences of diverse characteristics with respect to the motion, RGB texture, 3D geometry, and surface occlusion of the points. Each sequence is 10 seconds long and comprises 250 frames captured at 25 frames per second. The sequences are voxelized with a geometry precision of 9 to 12 bits, and the voxel color attributes are represented as 8-bit RGB values. The dataset also includes associated normals that make it more suitable for evaluating point cloud compression solutions. The main objective of releasing the UVG-VPC dataset is to foster the development of V3C technologies and thereby shape the future in this field. + +Keywords—Open dataset, point cloud, Visual Volumetric Video-based Coding (V3C), Video-based Point Cloud Compression (V-PCC), Extended Reality (XR) + +# I. INTRODUCTION + +Recent advances in volumetric visual media technologies have opened a plethora of opportunities for Extended Reality (XR). The state-of-the-art volumetric sensing and capturing technologies allow for the creation of detailed and immersive digital representations of the real world in three-dimensional (3D) space. In general, these representations can be represented as polygon meshes or point clouds that provide a realistic and detailed view of scenes from any viewpoint. Moreover, the natural and realistic viewing experience in XR is enhanced by 6 degrees of freedom (6DoF), which enables viewers to move around in the scene with both translational and rotational freedom and thereby expand the viewing space. + +Economic storage and transmission of volumetric visual data require efficient compression technologies. To that end, the Motion Picture Experts Group (MPEG) has released the Visual Volumetric Video-based Coding (V3C) standards ISO/IEC 23090-5 [1] to compress dynamic volumetric scenes for XR applications, including gaming, sports broadcasting, and motion pictures. V3C can be used to compress various types of volumetric content, such as point clouds, immersive video with depth, and mesh representations of visual volumetric frames. For the time being, V3C includes two standards: Video-based Point Cloud Compression (V-PCC) [2], and MPEG immersive video (MIV) [3], [4], of which this paper focuses on the V-PCC standard. + +Rate-distortion (RD) performance of video codes is typically evaluated with objective and subjective metrics, which involves a trade-off between coding efficiency and loss of information. Conducting these quality assessments comprehensively calls for representative datasets that cover a broad range of content (e.g., motion, texture, or occlusion). + +Given that V-PCC is commonly used for telecommunication and XR applications [4], the test set should be composed of voxelized point cloud full-body human subjects, as shown by the Common Test Conditions (CTC) for V-PCC [5]. + +Table I lists the existing open point cloud datasets of full human body. The first two of them are not voxelized [6], [7], whereas the remaining three have limitations in geometry precision and size [8]–[10]. Hence, none of them is optimal for the development and evaluation of V-PCC tools, so there is an urgent need for high-quality datasets that contain real-world scenes with multifaceted content and motion. + +This paper presents a new open dataset called UVG-VPC that is made up of 12 voxelized point cloud test sequences. Each sequence is $10\mathrm{s}$ in length, comprises 250 frames captured at a frame rate of 25 frame per second (fps), and has RGB attribute precision of 8 bits and a geometry precision of 9, 10, 11, and 12 bits. The dataset is available online at + +https://ultravideo.fi/UVG-VPC/ + +It is released under its own non-commercial license [11]. Additionally, the associated normals are provided for all sequences. To the best of our knowledge, the proposed dataset is the first and only one that has been entirely designed for the development, evaluation, and validation of V-PCC coding technologies. The UVG-VPC dataset seeks to serve as a valuable resource for researchers and practitioners in the field of volumetric data compression and beyond. + +The remainder of the paper is outlined as follows. Section II describes the volumetric capture studio setup used to obtain the needed data. Section III details the proposed workflow for voxelized point cloud generation. Section IV introduces our UVG-VPC dataset and discusses its characteristics. Finally, Section V concludes the paper. + +# II. VOLUMETRIC CAPTURE STUDIO + +The proposed dataset was captured with the volumetric capture studio developed by Mantis Vision [12]. Fig. 1 illustrates the studio setup that is composed of 32 (19 long and 13 short) camera units with different stereo distances. Each camera unit is composed of two RGB cameras, an IR projector, an IR (infrared) camera, and an Intel Next Unit of + +TABLE I. EXISTING OPEN POINT CLOUD DATASETS OF FULL HUMAN BODY + +
RefDataset#seq.Fps#frames#camsVoxelized/ geometry precision
[6]CWIPC-SXR2130596–27687No
[7]Volograms & V-SENSE330149–183012/60No
[8]8iVFBv2*43030042Yes/10bits
[9]Owlii*430600-Yes/11bits
[10]8iVSLF7301, 30039Yes/12bits
OurUVG-VPC122525096Yes/9–12bits
+ +*Partially included in the CTC for V-PCC [5]. + +![](images/f242914588f033803c2e5584c7b701b20ec0e783d3b3edd51333f51b6f080b0a.jpg) +Fig. 1. Volumetric capture setup. + +TABLE II. SPECIFICATION OF SHORT AND LONG CAMERA UNITS + +
Camera unit typeLong (×19)Short (×13)
RGB camera ×2SpecificationUI-328xCP-CUI-308xCP-C
Resolution2456×20542456×2054
Stereo distance~30cm~10cm
IR cameraSpecificationUI-314xCP-M
Resolution640×512
Intel NUCProcessorIntel(R) Core(TM) i7-8665U CPU @ 1.90GHz
Memory32 GB
Hard driveSamsung 970 EVO Plus SSD 1Tb
+ +Computing (NUC); their specifications are given in Table II. The camera units are connected through a tree topology using 10Gbps switches, with four camera units connected to each switch, and two switches connected to the render computer. The studio is able to capture volumetric video at up to 25 fps. + +The studio features 40 LED tubes of $50\mathrm{W}$ and a Sync LED that flashes and triggers at the recording frame rate. Cameras capture images at slightly different times to avoid IR interference, with opposite cameras exposing at the same time. The studio is set up to a height of $2.5\mathrm{m}$ , with a diameter of $3\mathrm{m}$ , and it allows scanning of a scene with a height of $2.2\mathrm{m}$ and a diameter of $1.6\mathrm{m}$ . To enhance the capture quality of faces, most cameras are located on the upper part of the body. + +# III. VOXELIZED POINT CLOUD GENERATION + +Fig. 2 depicts the five steps needed to generate the proposed voxelized point cloud dataset. The first two steps are processed by the off-the-shelf equipment of the volumetric capture studio. The remaining three steps are designed in this work to make our sequences matching the format of the sequences from CTC for V-PCC [5]. + +1) Point cloud acquisition is executed by the camera units and the render computer. The camera units capture both RGB and IR data, which are fused by the NUCs into a point cloud structure. Both camera data and generated point clouds are sent over the network to the render computer that merges them into a single Raw Merged Point Cloud. +2) Mesh generation is used to create a Mesh from the Raw Merged Point Cloud with off-the-shelf Poisson surface reconstruction algorithm provided with the volumetric capture studio. +3) Mesh sampling deploys triangle point picking from the trimesh Python library [13] to generate a Sampled Mesh Point Cloud. After studying the trade-off between having a sufficient number of points for the following voxelization process and a reasonable memory footprint, the number of sampled points was fixed to 10 million. + +![](images/5024f55b9e1fd437fb2821dc70826e646043da398eed2e51b77c0ff590cd39a5.jpg) +Fig. 2. Overview of the UVG-VPC dataset generation process. + +4) Voxelization is the process of applying point cloud data on a regular 3D grid structure, where each cell or voxel represents the presence or absence of points within its boundaries. When multiple points are involved, color attributes are averaged. For this step, the voxel size is computed as the maximum dimension of capture system bounding box across all dimensions divided by $2^{N}$ , where $N$ is the geometry precision. The UVG-VPC dataset includes Voxelized Point Clouds with a geometry precision of 9, 10, 11, and 12 bits. +5) Normal generation computes the Normals for each Voxelized Point Cloud in the UVG-VPC dataset using open3D Python library [14] and a Knn normal estimation with 12 neighbours [15], [16]. These Normals are used in the CTC for V-PCC to calculate the quality metric known as D2 [5]. Providing the Normals enables fair comparisons between solutions. + +In addition to the UVG-VPC dataset, we provide open access to the intermediate data used to create the sequences. Scientific community is free to use it in voxelized point cloud generation with varying geometry precision, as well as in other areas of interest such as mesh generation or dynamic mesh compression [17]. + +# IV. UVG-VPC DATASET + +The proposed UVG-VPC dataset consists of 12 sequences, each $10\mathrm{~s}$ long and composed of 250 frames captured at 25 fps. For each sequence, point cloud voxelized at 9, 10, 11, and 12 bits are provided with their associated normals. + +Table III lists the UVG-VPC sequences alphabetically and characterizes them with snapshots, names, content descriptions, and specific features. There is also a graph for each sequence that shows the distribution of points per frame for a geometry precision of 10 bits, as well as the corresponding average, minimum, and maximum values. + +The features of the sequences were carefully selected to make them challenging for various compression algorithms and ensure that the dataset is representative of real-world scenarios. In particular, the characterisation was done with respect to the following features: + +- speed: speed of moving points; +- motion field: quantity of moving points; +- RGB texture: texture complexity of the RGB attributes; +- 3D geometry: complexity of the volumetric shapes; and +- surface occlusion: number of (dis)appearing points. + +Table IV summarizes the characteristics of the UVG-VPC sequences. They all have unique characteristics, i.e., no two sequences sharing the same set of features. Furthermore, some + +TABLE III. CHARACTERISTICS OF THE VOXELIZED POINT CLOUD SEQUENCES IN THE PROPOSED UVG-VPC DATASET + +
SnapshotName and description#pts/frame (10-bits)SnapshotName and description#pts/frame (10-bits)
Name: Blue Backpack +Description: a person takes a jacket out of a backpack and puts it on. +Specific features: unicolor clothes and accessories; the interaction with the accessories adds 3D geometry complexity and surface occlusions.#PTS +1.4 #10^5 +1.2 +0.8 +0.5 +Average: 1 051 399 +Min: 799 322 +Max: 1 302 904Name: BlueSpin +Description: a person dressed in blue is steadily spinning around. +Specific features: unicolor clothes; steady rotation about a fixed axis.7.5 #10^5 +7.7 +6.5 +0 +Frame ID +Average: 685 044 +Min: 679 347 +Max: 693 684
Name: BlueSquat +Description: a person dressed in blue is performing squats. +Specific features: unicolor clothes; intermittent surface occlusions due to body movements.#PTS +7.5 #10^5 +7.7 +6.5 +0 +Average: 718 167 +Min: 667 024 +Max: 741 177Name: CasualSpin +Description: a person wearing a striped shirt and jeans is steadily spinning around. +Specific features: textured top, unicolor bottom; steady rotation about a fixed axis.#PTS +6.2 +5.8 +0 +Frame ID +Average: 599 669 +Min: 588 394 +Max: 619 662
Name: CasualSquat +Description: a person wearing a striped shirt and jeans is performing squats. +Specific features: textured top; unicolor bottom; intermittent surface occlusions due to body movements.#PTS +6.2 +5.8 +0 +Average: 614 419 +Min: 602 150 +Max: 629 416Name: ElegantDance +Description: a person wearing a long black dress is dancing and twirling around. +Specific features: unicolor clothes; as the person moves and twirls, the dress is folding and shifting, creating a complex 3D geometry.#PTS +10 #10^5 +10 +0 +0 +0 +Average: 771 714 +Min: 579 917 +Max: 1 098 016
Name: ElegantWave +Description: a person wearing a long black dress greets by waving hand. +Specific features: unicolor clothes; only upper-body movement; no dress movement; simple 3D geometry.#PTS +10 #10^5 +5 +0 +Average: 661 405 +Min: 650 731 +Max: 674 383Name: FlowerDance +Description: a person wearing a long flower dress is dancing and twirling around. +Specific features: textured clothes; as the person moves and twirls, the dress is folding and shifting, creating a complex 3D geometry.#PTS +10 #10^5 +10 +0 +0 +0 +Average: 755 299 +Min: 650 961 +Max: 913 621
Name: FlowerWave +Description: a person wearing a long flower dress greets by waving hand. +Specific features: textured clothes; only upper-body movement; no dress movement; simple 3D geometry.#PTS +10 #10^5 +6 +0 +Average: 691 334 +Min: 659 972 +Max: 708 898Name: Gymnast +Description: a person stands on one leg and does a leg hold. +Specific features: unicolor clothes; leg movement creates surface occlusion.#PTS +5.5 #10^5 +5 +0 +0 +0 +Average: 523 078 +Min: 509 576 +Max: 551 604
Name: HelloGoodbye +Description: a person wearing a long black dress enters the scene, greets by waving hand, and leaves the scene. +Specific features: unicolor clothes; empty capture space at sequence start and end.#PTS +10 #10^5 +5 +0 +Average: 639 807 +Min: 133 276 +Max: 929 588Name: ReadyForWinter +Description: a person puts on a beanie and a scarf. +Specific features: unicolor clothes; textured scarf; interaction with accessories creates complex surface structures and surface occlusions.#PTS +7.0 #10^5 +7.0 +7.0 +0 +Frame ID +Average: 794 317 +Min: 730 913 +Max: 883 642
+ +TABLE IV. UVG-VPC DATASET CHARACTERIZATION + +
SequenceSpeedMotion fieldRGB texture3D geometrySurface occlusion
BlueBackpackFastMediumSimpleComplexPlenty of
BlueSpinMediumDenseSimpleSimpleLittle
BlueSquatFastDenseSimpleMediumMedium
CasualSpinMediumDenseMediumSimpleLittle
CasualSquatFastDenseMediumMediumMedium
ElegantDanceFastDenseSimpleComplexPlenty of
ElegantWaveSlowSparseSimpleSimpleLittle
FlowerDanceFastDenseComplexComplexPlenty of
FlowerWaveSlowSparseComplexSimpleLittle
GymnastMediumMediumSimpleSimpleMedium
HelloGoodbyeMediumMediumSimpleMediumPlenty of
ReadyForWinterMediumMediumMediumComplexPlenty of
+ +sequences were specifically designed to contrast with each other in terms of one or more of these criteria. + +# V. CONCLUSION + +This paper presented the UVG-VPC open dataset, which has been carefully designed to facilitate the development, evaluation, and validation of V-PCC coding technology. The dataset consists of 12 voxelized point cloud sequences and associated normals. We believe that the availability of the UVG-VPC dataset will enable researchers and practitioners to advance the state-of-the-art in point cloud compression and foster its deployment in immersive visual media applications. + +# ACKNOWLEDGMENT + +This work was carried out with the support of Centre for Immersive Visual Technologies (CIVIT) research infrastructure, Tampere University, Finland. In addition, the authors wish to acknowledge CSC - IT Center for Science, Finland, for computational and storage resources. + +# REFERENCES + +[1] ISO/IEC 23090-5:2021. "Information technology — coded representation of immersive media — part 5: visual volumetric video-based coding (V3C) and video-based point cloud compression (V-PCC)," Jun. 2021. +[2] D. Graziosi, et al., "An overview of ongoing point cloud compression standardization activities: video-based (V-PCC) and geometry-based (G-PCC)," APSIPA Trans. Signal Information Process., vol. 9, Apr. 2020. +[3] J. M. Boyce, et al., "MPEG immersive video coding standard," Proc. IEEE, vol. 109, no. 9, pp. 1521-1536, Sep. 2021. +[4] V. K. M. Vadakital, et al., “The MPEG immersive video standard—current status and future outlook,” IEEE MultiMedia, vol. 29, no. 3, pp. 101–111, Jul.–Sep. 2022. +[5] ISO/IEC JTC1/SC29/WG11, "Common test conditions for V3C and V-PCC," Document N19518, Online, Jul. 2020. +[6] I. Reimat, et al., "CWIPC-SXR: point cloud dynamic human dataset for social XR," in Proc. ACM Multimedia Sys. Conf., pp. 300-306, Istanbul, Turkey, Sep. 2021. +[7] R. Pagés, K. Amplanitis, J. Ondrej, E. Zerman, and A. Smolic, "Volograms & V-SENSE volumetric video dataset," Mar. 2022. +[8] E. d'Eon, B. Harrison, T. Myers, and P. A. Chou, "8i voxelized full bodies - a voxelized point cloud dataset," ISO/IEC JTC1/SC29 Joint WG11/WG1, Document WG11M40059/WG1M74006, Geneva, Switzerland, Jan. 2017. + +[9] Y. Xu, Y. Lu, and Z. Wen, "Owlii dynamic human mesh sequence dataset," ISO/IEC JTC1/SC29/WG11 Document M41658, Macau, China, Oct. 2017. +[10] M. Krivokuca, P. A. Chou, and P. Savill, "8i voxelized surface light field (8iVSLF) dataset," ISO/IEC JTC1/SC29 WG11, Document M42914, Ljubljana, Slovenia, Jul. 2018. +[11] UVG-VPC Licence, [Online], Available: https://ultravideo.fi/UVG-VPC/ licence.pdf, Accessed: May. 26, 2023. +[12] Mantis Vision Website, [Online], Available: https://mantis-vision.com/, Accessed: Apr. 26, 2023. +[13] E. W. Weisstein, “Triangle point picking,” [Online], Available: https://mathworld.wolfram.com/TrianglePointPicking.html, Accessed: Apr. 26, 2023. +[14] Q.-Y. Zhou, J. Park, and V. Koltun, "Open3D: a modern library for 3D data processing," arXiv:1801.09847, Jan. 2018. +[15] D. Tian, H. Ochimizu, C. Feng, R. Cohen, and A. Vetro, “Evaluation metrics for point cloud compression,” ISO/IEC JTC1/SC29/WG11, Document M39966, Geneva, Switzerland, Jan. 2017. +[16] D. Tian, H. Ochimizu, C. Feng, R. Cohen, and A. Vetro, “Updates and integration of evaluation metric software for PCC,” ISO/IEC JTC1/SC29/WG11, Document M40522, Hobart, Australia, Apr. 2017. +[17] M. Wien, J. Jung, and V. Baroncini, "Formal visual evaluation and study of objective metrics for MPEG dynamic mesh coding," in Proc. Eur. Workshop Vis. Inf. Process., Lisbon, Portugal, Sep. 2022. \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05888/images/5024f55b9e1fd437fb2821dc70826e646043da398eed2e51b77c0ff590cd39a5.jpg b/data/2025/2504_05xxx/2504.05888/images/5024f55b9e1fd437fb2821dc70826e646043da398eed2e51b77c0ff590cd39a5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8dde3d330ad71e432a1ec686611f9a9e6bc5e388 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05888/images/5024f55b9e1fd437fb2821dc70826e646043da398eed2e51b77c0ff590cd39a5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f847d09470c40c3ff185ec1623c4b63fa3a1c161b87901fc13f35ad861413f9 +size 37392 diff --git a/data/2025/2504_05xxx/2504.05888/images/5be5397b2dddb8bf27fafc0d3ae7ba60ba325d54a8206b360cdde882b0effc85.jpg b/data/2025/2504_05xxx/2504.05888/images/5be5397b2dddb8bf27fafc0d3ae7ba60ba325d54a8206b360cdde882b0effc85.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1ec78186e8004ade2abf40d1291a61740bcfb885 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05888/images/5be5397b2dddb8bf27fafc0d3ae7ba60ba325d54a8206b360cdde882b0effc85.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c667547c89347d356c5b9df72fd6f0dcd89592271ddb3fe05cd02baad47cf22 +size 85608 diff --git a/data/2025/2504_05xxx/2504.05888/images/75688d5d77774870bb82790d226ffd56321eb2120c7a0cf4564f1f6e2d72b4a4.jpg b/data/2025/2504_05xxx/2504.05888/images/75688d5d77774870bb82790d226ffd56321eb2120c7a0cf4564f1f6e2d72b4a4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..03320f7ea0d96ecab02953d60ef08ead01d3f7f5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05888/images/75688d5d77774870bb82790d226ffd56321eb2120c7a0cf4564f1f6e2d72b4a4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1b2efec9e3eb61c2c54c5fd5c68a4544518fe30876a6a8582c7ca473d0db1a4 +size 49053 diff --git a/data/2025/2504_05xxx/2504.05888/images/8bcf6a46d1ef70ca523d9e20c79aa14744e9c04566f6fdf3b294a8d3447a3d42.jpg b/data/2025/2504_05xxx/2504.05888/images/8bcf6a46d1ef70ca523d9e20c79aa14744e9c04566f6fdf3b294a8d3447a3d42.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d855b47816b2342b5457b4d76917b3cdda9c9380 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05888/images/8bcf6a46d1ef70ca523d9e20c79aa14744e9c04566f6fdf3b294a8d3447a3d42.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3bc805621ad258fe8cf7f2e3e5c9ff7b6a7f5a85bfcadabda61e5213d37d7ab +size 399548 diff --git a/data/2025/2504_05xxx/2504.05888/images/949f10347b71bcf7a724678c83278f617abc44cc6e4d50267a87348b633002f1.jpg b/data/2025/2504_05xxx/2504.05888/images/949f10347b71bcf7a724678c83278f617abc44cc6e4d50267a87348b633002f1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f295f21bd5748edcbd192a8a940cf905d08f5b16 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05888/images/949f10347b71bcf7a724678c83278f617abc44cc6e4d50267a87348b633002f1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7bdf36ecf96cb34b39124bfd8966f799779b149da7d2a5626ac02b27a7d258f +size 45480 diff --git a/data/2025/2504_05xxx/2504.05888/images/f242914588f033803c2e5584c7b701b20ec0e783d3b3edd51333f51b6f080b0a.jpg b/data/2025/2504_05xxx/2504.05888/images/f242914588f033803c2e5584c7b701b20ec0e783d3b3edd51333f51b6f080b0a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e75ae118edf83db87d5499edb2b5d013343a4d4b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05888/images/f242914588f033803c2e5584c7b701b20ec0e783d3b3edd51333f51b6f080b0a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04e32df802b937926808d4c11f501f8292ce282b558c64e6111d0a4afd9363fb +size 29140 diff --git a/data/2025/2504_05xxx/2504.05888/layout.json b/data/2025/2504_05xxx/2504.05888/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..25cfe6ed9459cb7aeaa90abbe709ba0f26ff1b6f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05888/layout.json @@ -0,0 +1,2815 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 88, + 27, + 505, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 27, + 505, + 83 + ], + "spans": [ + { + "bbox": [ + 88, + 27, + 505, + 83 + ], + "type": "text", + "content": "UVG-VPC: Voxelized Point Cloud Dataset for Visual Volumetric Video-based Coding" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 133, + 95, + 461, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 95, + 461, + 116 + ], + "spans": [ + { + "bbox": [ + 133, + 95, + 461, + 116 + ], + "type": "text", + "content": "Guillaume Gautier, Alexandre Mercat, Louis Fréneau, Mikko Pitkänen, and Jarno Vanne \nUltra Video Group, Tampere University, Tampere, Finland" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 132, + 116, + 461, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 461, + 126 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 461, + 126 + ], + "type": "text", + "content": "{guillaume.gautier, alexandre.mercat, louis.freneau, mikko.pitkanen, jarno.vanne} @tuni.fi" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 41, + 141, + 289, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 141, + 289, + 317 + ], + "spans": [ + { + "bbox": [ + 41, + 141, + 289, + 317 + ], + "type": "text", + "content": "Abstract- Point cloud compression has become a crucial factor in immersive visual media processing and streaming. This paper presents a new open dataset called UVG-VPC for the development, evaluation, and validation of MPEG Visual Volumetric Video-based Coding (V3C) technology. The dataset is distributed under its own non-commercial license. It consists of 12 point cloud test video sequences of diverse characteristics with respect to the motion, RGB texture, 3D geometry, and surface occlusion of the points. Each sequence is 10 seconds long and comprises 250 frames captured at 25 frames per second. The sequences are voxelized with a geometry precision of 9 to 12 bits, and the voxel color attributes are represented as 8-bit RGB values. The dataset also includes associated normals that make it more suitable for evaluating point cloud compression solutions. The main objective of releasing the UVG-VPC dataset is to foster the development of V3C technologies and thereby shape the future in this field." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 42, + 327, + 289, + 359 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 327, + 289, + 359 + ], + "spans": [ + { + "bbox": [ + 42, + 327, + 289, + 359 + ], + "type": "text", + "content": "Keywords—Open dataset, point cloud, Visual Volumetric Video-based Coding (V3C), Video-based Point Cloud Compression (V-PCC), Extended Reality (XR)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 367, + 203, + 377 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 367, + 203, + 377 + ], + "spans": [ + { + "bbox": [ + 121, + 367, + 203, + 377 + ], + "type": "text", + "content": "I. INTRODUCTION" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 41, + 380, + 289, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 380, + 289, + 513 + ], + "spans": [ + { + "bbox": [ + 41, + 380, + 289, + 513 + ], + "type": "text", + "content": "Recent advances in volumetric visual media technologies have opened a plethora of opportunities for Extended Reality (XR). The state-of-the-art volumetric sensing and capturing technologies allow for the creation of detailed and immersive digital representations of the real world in three-dimensional (3D) space. In general, these representations can be represented as polygon meshes or point clouds that provide a realistic and detailed view of scenes from any viewpoint. Moreover, the natural and realistic viewing experience in XR is enhanced by 6 degrees of freedom (6DoF), which enables viewers to move around in the scene with both translational and rotational freedom and thereby expand the viewing space." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 41, + 518, + 289, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 518, + 289, + 661 + ], + "spans": [ + { + "bbox": [ + 41, + 518, + 289, + 661 + ], + "type": "text", + "content": "Economic storage and transmission of volumetric visual data require efficient compression technologies. To that end, the Motion Picture Experts Group (MPEG) has released the Visual Volumetric Video-based Coding (V3C) standards ISO/IEC 23090-5 [1] to compress dynamic volumetric scenes for XR applications, including gaming, sports broadcasting, and motion pictures. V3C can be used to compress various types of volumetric content, such as point clouds, immersive video with depth, and mesh representations of visual volumetric frames. For the time being, V3C includes two standards: Video-based Point Cloud Compression (V-PCC) [2], and MPEG immersive video (MIV) [3], [4], of which this paper focuses on the V-PCC standard." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 41, + 666, + 289, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 666, + 289, + 734 + ], + "spans": [ + { + "bbox": [ + 41, + 666, + 289, + 734 + ], + "type": "text", + "content": "Rate-distortion (RD) performance of video codes is typically evaluated with objective and subjective metrics, which involves a trade-off between coding efficiency and loss of information. Conducting these quality assessments comprehensively calls for representative datasets that cover a broad range of content (e.g., motion, texture, or occlusion)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 141, + 550, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 141, + 550, + 185 + ], + "spans": [ + { + "bbox": [ + 302, + 141, + 550, + 185 + ], + "type": "text", + "content": "Given that V-PCC is commonly used for telecommunication and XR applications [4], the test set should be composed of voxelized point cloud full-body human subjects, as shown by the Common Test Conditions (CTC) for V-PCC [5]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 190, + 551, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 190, + 551, + 268 + ], + "spans": [ + { + "bbox": [ + 302, + 190, + 551, + 268 + ], + "type": "text", + "content": "Table I lists the existing open point cloud datasets of full human body. The first two of them are not voxelized [6], [7], whereas the remaining three have limitations in geometry precision and size [8]–[10]. Hence, none of them is optimal for the development and evaluation of V-PCC tools, so there is an urgent need for high-quality datasets that contain real-world scenes with multifaceted content and motion." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 273, + 551, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 273, + 551, + 339 + ], + "spans": [ + { + "bbox": [ + 302, + 273, + 551, + 339 + ], + "type": "text", + "content": "This paper presents a new open dataset called UVG-VPC that is made up of 12 voxelized point cloud test sequences. Each sequence is " + }, + { + "bbox": [ + 302, + 273, + 551, + 339 + ], + "type": "inline_equation", + "content": "10\\mathrm{s}" + }, + { + "bbox": [ + 302, + 273, + 551, + 339 + ], + "type": "text", + "content": " in length, comprises 250 frames captured at a frame rate of 25 frame per second (fps), and has RGB attribute precision of 8 bits and a geometry precision of 9, 10, 11, and 12 bits. The dataset is available online at" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 361, + 342, + 492, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 361, + 342, + 492, + 354 + ], + "spans": [ + { + "bbox": [ + 361, + 342, + 492, + 354 + ], + "type": "text", + "content": "https://ultravideo.fi/UVG-VPC/" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 359, + 550, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 359, + 550, + 447 + ], + "spans": [ + { + "bbox": [ + 302, + 359, + 550, + 447 + ], + "type": "text", + "content": "It is released under its own non-commercial license [11]. Additionally, the associated normals are provided for all sequences. To the best of our knowledge, the proposed dataset is the first and only one that has been entirely designed for the development, evaluation, and validation of V-PCC coding technologies. The UVG-VPC dataset seeks to serve as a valuable resource for researchers and practitioners in the field of volumetric data compression and beyond." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 452, + 550, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 452, + 550, + 518 + ], + "spans": [ + { + "bbox": [ + 302, + 452, + 550, + 518 + ], + "type": "text", + "content": "The remainder of the paper is outlined as follows. Section II describes the volumetric capture studio setup used to obtain the needed data. Section III details the proposed workflow for voxelized point cloud generation. Section IV introduces our UVG-VPC dataset and discusses its characteristics. Finally, Section V concludes the paper." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 347, + 526, + 497, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 526, + 497, + 537 + ], + "spans": [ + { + "bbox": [ + 347, + 526, + 497, + 537 + ], + "type": "text", + "content": "II. VOLUMETRIC CAPTURE STUDIO" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 540, + 551, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 540, + 551, + 608 + ], + "spans": [ + { + "bbox": [ + 302, + 540, + 551, + 608 + ], + "type": "text", + "content": "The proposed dataset was captured with the volumetric capture studio developed by Mantis Vision [12]. Fig. 1 illustrates the studio setup that is composed of 32 (19 long and 13 short) camera units with different stereo distances. Each camera unit is composed of two RGB cameras, an IR projector, an IR (infrared) camera, and an Intel Next Unit of" + } + ] + } + ], + "index": 16 + }, + { + "type": "table", + "bbox": [ + 307, + 647, + 552, + 746 + ], + "blocks": [ + { + "bbox": [ + 307, + 637, + 551, + 646 + ], + "lines": [ + { + "bbox": [ + 307, + 637, + 551, + 646 + ], + "spans": [ + { + "bbox": [ + 307, + 637, + 551, + 646 + ], + "type": "text", + "content": "TABLE I. EXISTING OPEN POINT CLOUD DATASETS OF FULL HUMAN BODY" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 647, + 552, + 746 + ], + "lines": [ + { + "bbox": [ + 307, + 647, + 552, + 746 + ], + "spans": [ + { + "bbox": [ + 307, + 647, + 552, + 746 + ], + "type": "table", + "html": "
RefDataset#seq.Fps#frames#camsVoxelized/ geometry precision
[6]CWIPC-SXR2130596–27687No
[7]Volograms & V-SENSE330149–183012/60No
[8]8iVFBv2*43030042Yes/10bits
[9]Owlii*430600-Yes/11bits
[10]8iVSLF7301, 30039Yes/12bits
OurUVG-VPC122525096Yes/9–12bits
", + "image_path": "949f10347b71bcf7a724678c83278f617abc44cc6e4d50267a87348b633002f1.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 307, + 747, + 457, + 756 + ], + "lines": [ + { + "bbox": [ + 307, + 747, + 457, + 756 + ], + "spans": [ + { + "bbox": [ + 307, + 747, + 457, + 756 + ], + "type": "text", + "content": "*Partially included in the CTC for V-PCC [5]." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 41, + 743, + 287, + 768 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 743, + 287, + 768 + ], + "spans": [ + { + "bbox": [ + 41, + 743, + 287, + 768 + ], + "type": "text", + "content": "This work was supported in part by the XR Simulation and Presence at the Cloud Edge (XR-SPACE) project led by Nokia and funded by Business Finland, and the Academy of Finland (decision no. 349216)." + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 91, + 52, + 252, + 177 + ], + "blocks": [ + { + "bbox": [ + 91, + 52, + 252, + 177 + ], + "lines": [ + { + "bbox": [ + 91, + 52, + 252, + 177 + ], + "spans": [ + { + "bbox": [ + 91, + 52, + 252, + 177 + ], + "type": "image", + "image_path": "f242914588f033803c2e5584c7b701b20ec0e783d3b3edd51333f51b6f080b0a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 42, + 179, + 151, + 190 + ], + "lines": [ + { + "bbox": [ + 42, + 179, + 151, + 190 + ], + "spans": [ + { + "bbox": [ + 42, + 179, + 151, + 190 + ], + "type": "text", + "content": "Fig. 1. Volumetric capture setup." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 44, + 211, + 287, + 312 + ], + "blocks": [ + { + "bbox": [ + 60, + 200, + 270, + 210 + ], + "lines": [ + { + "bbox": [ + 60, + 200, + 270, + 210 + ], + "spans": [ + { + "bbox": [ + 60, + 200, + 270, + 210 + ], + "type": "text", + "content": "TABLE II. SPECIFICATION OF SHORT AND LONG CAMERA UNITS" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 44, + 211, + 287, + 312 + ], + "lines": [ + { + "bbox": [ + 44, + 211, + 287, + 312 + ], + "spans": [ + { + "bbox": [ + 44, + 211, + 287, + 312 + ], + "type": "table", + "html": "
Camera unit typeLong (×19)Short (×13)
RGB camera ×2SpecificationUI-328xCP-CUI-308xCP-C
Resolution2456×20542456×2054
Stereo distance~30cm~10cm
IR cameraSpecificationUI-314xCP-M
Resolution640×512
Intel NUCProcessorIntel(R) Core(TM) i7-8665U CPU @ 1.90GHz
Memory32 GB
Hard driveSamsung 970 EVO Plus SSD 1Tb
", + "image_path": "75688d5d77774870bb82790d226ffd56321eb2120c7a0cf4564f1f6e2d72b4a4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 42, + 318, + 289, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 318, + 289, + 375 + ], + "spans": [ + { + "bbox": [ + 42, + 318, + 289, + 375 + ], + "type": "text", + "content": "Computing (NUC); their specifications are given in Table II. The camera units are connected through a tree topology using 10Gbps switches, with four camera units connected to each switch, and two switches connected to the render computer. The studio is able to capture volumetric video at up to 25 fps." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 42, + 379, + 289, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 379, + 289, + 468 + ], + "spans": [ + { + "bbox": [ + 42, + 379, + 289, + 468 + ], + "type": "text", + "content": "The studio features 40 LED tubes of " + }, + { + "bbox": [ + 42, + 379, + 289, + 468 + ], + "type": "inline_equation", + "content": "50\\mathrm{W}" + }, + { + "bbox": [ + 42, + 379, + 289, + 468 + ], + "type": "text", + "content": " and a Sync LED that flashes and triggers at the recording frame rate. Cameras capture images at slightly different times to avoid IR interference, with opposite cameras exposing at the same time. The studio is set up to a height of " + }, + { + "bbox": [ + 42, + 379, + 289, + 468 + ], + "type": "inline_equation", + "content": "2.5\\mathrm{m}" + }, + { + "bbox": [ + 42, + 379, + 289, + 468 + ], + "type": "text", + "content": ", with a diameter of " + }, + { + "bbox": [ + 42, + 379, + 289, + 468 + ], + "type": "inline_equation", + "content": "3\\mathrm{m}" + }, + { + "bbox": [ + 42, + 379, + 289, + 468 + ], + "type": "text", + "content": ", and it allows scanning of a scene with a height of " + }, + { + "bbox": [ + 42, + 379, + 289, + 468 + ], + "type": "inline_equation", + "content": "2.2\\mathrm{m}" + }, + { + "bbox": [ + 42, + 379, + 289, + 468 + ], + "type": "text", + "content": " and a diameter of " + }, + { + "bbox": [ + 42, + 379, + 289, + 468 + ], + "type": "inline_equation", + "content": "1.6\\mathrm{m}" + }, + { + "bbox": [ + 42, + 379, + 289, + 468 + ], + "type": "text", + "content": ". To enhance the capture quality of faces, most cameras are located on the upper part of the body." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 66, + 475, + 252, + 486 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 475, + 252, + 486 + ], + "spans": [ + { + "bbox": [ + 66, + 475, + 252, + 486 + ], + "type": "text", + "content": "III. VOXELIZED POINT CLOUD GENERATION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 42, + 490, + 289, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 490, + 289, + 556 + ], + "spans": [ + { + "bbox": [ + 42, + 490, + 289, + 556 + ], + "type": "text", + "content": "Fig. 2 depicts the five steps needed to generate the proposed voxelized point cloud dataset. The first two steps are processed by the off-the-shelf equipment of the volumetric capture studio. The remaining three steps are designed in this work to make our sequences matching the format of the sequences from CTC for V-PCC [5]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 42, + 561, + 289, + 754 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 43, + 561, + 289, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 561, + 289, + 638 + ], + "spans": [ + { + "bbox": [ + 43, + 561, + 289, + 638 + ], + "type": "text", + "content": "1) Point cloud acquisition is executed by the camera units and the render computer. The camera units capture both RGB and IR data, which are fused by the NUCs into a point cloud structure. Both camera data and generated point clouds are sent over the network to the render computer that merges them into a single Raw Merged Point Cloud." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 42, + 640, + 289, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 640, + 289, + 685 + ], + "spans": [ + { + "bbox": [ + 42, + 640, + 289, + 685 + ], + "type": "text", + "content": "2) Mesh generation is used to create a Mesh from the Raw Merged Point Cloud with off-the-shelf Poisson surface reconstruction algorithm provided with the volumetric capture studio." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 42, + 687, + 289, + 754 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 687, + 289, + 754 + ], + "spans": [ + { + "bbox": [ + 42, + 687, + 289, + 754 + ], + "type": "text", + "content": "3) Mesh sampling deploys triangle point picking from the trimesh Python library [13] to generate a Sampled Mesh Point Cloud. After studying the trade-off between having a sufficient number of points for the following voxelization process and a reasonable memory footprint, the number of sampled points was fixed to 10 million." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 305, + 52, + 545, + 169 + ], + "blocks": [ + { + "bbox": [ + 305, + 52, + 545, + 169 + ], + "lines": [ + { + "bbox": [ + 305, + 52, + 545, + 169 + ], + "spans": [ + { + "bbox": [ + 305, + 52, + 545, + 169 + ], + "type": "image", + "image_path": "5024f55b9e1fd437fb2821dc70826e646043da398eed2e51b77c0ff590cd39a5.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 169, + 507, + 179 + ], + "lines": [ + { + "bbox": [ + 304, + 169, + 507, + 179 + ], + "spans": [ + { + "bbox": [ + 304, + 169, + 507, + 179 + ], + "type": "text", + "content": "Fig. 2. Overview of the UVG-VPC dataset generation process." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 303, + 187, + 550, + 375 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 303, + 187, + 550, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 187, + 550, + 297 + ], + "spans": [ + { + "bbox": [ + 303, + 187, + 550, + 297 + ], + "type": "text", + "content": "4) Voxelization is the process of applying point cloud data on a regular 3D grid structure, where each cell or voxel represents the presence or absence of points within its boundaries. When multiple points are involved, color attributes are averaged. For this step, the voxel size is computed as the maximum dimension of capture system bounding box across all dimensions divided by " + }, + { + "bbox": [ + 303, + 187, + 550, + 297 + ], + "type": "inline_equation", + "content": "2^{N}" + }, + { + "bbox": [ + 303, + 187, + 550, + 297 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 303, + 187, + 550, + 297 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 303, + 187, + 550, + 297 + ], + "type": "text", + "content": " is the geometry precision. The UVG-VPC dataset includes Voxelized Point Clouds with a geometry precision of 9, 10, 11, and 12 bits." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 303, + 300, + 550, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 300, + 550, + 375 + ], + "spans": [ + { + "bbox": [ + 303, + 300, + 550, + 375 + ], + "type": "text", + "content": "5) Normal generation computes the Normals for each Voxelized Point Cloud in the UVG-VPC dataset using open3D Python library [14] and a Knn normal estimation with 12 neighbours [15], [16]. These Normals are used in the CTC for V-PCC to calculate the quality metric known as D2 [5]. Providing the Normals enables fair comparisons between solutions." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 302, + 379, + 550, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 379, + 550, + 445 + ], + "spans": [ + { + "bbox": [ + 302, + 379, + 550, + 445 + ], + "type": "text", + "content": "In addition to the UVG-VPC dataset, we provide open access to the intermediate data used to create the sequences. Scientific community is free to use it in voxelized point cloud generation with varying geometry precision, as well as in other areas of interest such as mesh generation or dynamic mesh compression [17]." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 364, + 453, + 476, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 364, + 453, + 476, + 464 + ], + "spans": [ + { + "bbox": [ + 364, + 453, + 476, + 464 + ], + "type": "text", + "content": "IV. UVG-VPC DATASET" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 302, + 468, + 550, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 468, + 550, + 513 + ], + "spans": [ + { + "bbox": [ + 302, + 468, + 550, + 513 + ], + "type": "text", + "content": "The proposed UVG-VPC dataset consists of 12 sequences, each " + }, + { + "bbox": [ + 302, + 468, + 550, + 513 + ], + "type": "inline_equation", + "content": "10\\mathrm{~s}" + }, + { + "bbox": [ + 302, + 468, + 550, + 513 + ], + "type": "text", + "content": " long and composed of 250 frames captured at 25 fps. For each sequence, point cloud voxelized at 9, 10, 11, and 12 bits are provided with their associated normals." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 302, + 517, + 550, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 517, + 550, + 584 + ], + "spans": [ + { + "bbox": [ + 302, + 517, + 550, + 584 + ], + "type": "text", + "content": "Table III lists the UVG-VPC sequences alphabetically and characterizes them with snapshots, names, content descriptions, and specific features. There is also a graph for each sequence that shows the distribution of points per frame for a geometry precision of 10 bits, as well as the corresponding average, minimum, and maximum values." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 302, + 589, + 550, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 589, + 550, + 645 + ], + "spans": [ + { + "bbox": [ + 302, + 589, + 550, + 645 + ], + "type": "text", + "content": "The features of the sequences were carefully selected to make them challenging for various compression algorithms and ensure that the dataset is representative of real-world scenarios. In particular, the characterisation was done with respect to the following features:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 648, + 535, + 718 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 304, + 648, + 438, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 648, + 438, + 659 + ], + "spans": [ + { + "bbox": [ + 304, + 648, + 438, + 659 + ], + "type": "text", + "content": "- speed: speed of moving points;" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 662, + 474, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 662, + 474, + 674 + ], + "spans": [ + { + "bbox": [ + 304, + 662, + 474, + 674 + ], + "type": "text", + "content": "- motion field: quantity of moving points;" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 677, + 535, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 535, + 688 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 535, + 688 + ], + "type": "text", + "content": "- RGB texture: texture complexity of the RGB attributes;" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 304, + 692, + 535, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 692, + 535, + 703 + ], + "spans": [ + { + "bbox": [ + 304, + 692, + 535, + 703 + ], + "type": "text", + "content": "- 3D geometry: complexity of the volumetric shapes; and" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 304, + 706, + 518, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 706, + 518, + 718 + ], + "spans": [ + { + "bbox": [ + 304, + 706, + 518, + 718 + ], + "type": "text", + "content": "- surface occlusion: number of (dis)appearing points." + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 302, + 719, + 550, + 754 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 719, + 550, + 754 + ], + "spans": [ + { + "bbox": [ + 302, + 719, + 550, + 754 + ], + "type": "text", + "content": "Table IV summarizes the characteristics of the UVG-VPC sequences. They all have unique characteristics, i.e., no two sequences sharing the same set of features. Furthermore, some" + } + ] + } + ], + "index": 28 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 46, + 63, + 547, + 550 + ], + "blocks": [ + { + "bbox": [ + 105, + 52, + 486, + 61 + ], + "lines": [ + { + "bbox": [ + 105, + 52, + 486, + 61 + ], + "spans": [ + { + "bbox": [ + 105, + 52, + 486, + 61 + ], + "type": "text", + "content": "TABLE III. CHARACTERISTICS OF THE VOXELIZED POINT CLOUD SEQUENCES IN THE PROPOSED UVG-VPC DATASET" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 46, + 63, + 547, + 550 + ], + "lines": [ + { + "bbox": [ + 46, + 63, + 547, + 550 + ], + "spans": [ + { + "bbox": [ + 46, + 63, + 547, + 550 + ], + "type": "table", + "html": "
SnapshotName and description#pts/frame (10-bits)SnapshotName and description#pts/frame (10-bits)
Name: Blue Backpack\nDescription: a person takes a jacket out of a backpack and puts it on.\nSpecific features: unicolor clothes and accessories; the interaction with the accessories adds 3D geometry complexity and surface occlusions.#PTS\n1.4 #10^5\n1.2\n0.8\n0.5\nAverage: 1 051 399\nMin: 799 322\nMax: 1 302 904Name: BlueSpin\nDescription: a person dressed in blue is steadily spinning around.\nSpecific features: unicolor clothes; steady rotation about a fixed axis.7.5 #10^5\n7.7\n6.5\n0\nFrame ID\nAverage: 685 044\nMin: 679 347\nMax: 693 684
Name: BlueSquat\nDescription: a person dressed in blue is performing squats.\nSpecific features: unicolor clothes; intermittent surface occlusions due to body movements.#PTS\n7.5 #10^5\n7.7\n6.5\n0\nAverage: 718 167\nMin: 667 024\nMax: 741 177Name: CasualSpin\nDescription: a person wearing a striped shirt and jeans is steadily spinning around.\nSpecific features: textured top, unicolor bottom; steady rotation about a fixed axis.#PTS\n6.2\n5.8\n0\nFrame ID\nAverage: 599 669\nMin: 588 394\nMax: 619 662
Name: CasualSquat\nDescription: a person wearing a striped shirt and jeans is performing squats.\nSpecific features: textured top; unicolor bottom; intermittent surface occlusions due to body movements.#PTS\n6.2\n5.8\n0\nAverage: 614 419\nMin: 602 150\nMax: 629 416Name: ElegantDance\nDescription: a person wearing a long black dress is dancing and twirling around.\nSpecific features: unicolor clothes; as the person moves and twirls, the dress is folding and shifting, creating a complex 3D geometry.#PTS\n10 #10^5\n10\n0\n0\n0\nAverage: 771 714\nMin: 579 917\nMax: 1 098 016
Name: ElegantWave\nDescription: a person wearing a long black dress greets by waving hand.\nSpecific features: unicolor clothes; only upper-body movement; no dress movement; simple 3D geometry.#PTS\n10 #10^5\n5\n0\nAverage: 661 405\nMin: 650 731\nMax: 674 383Name: FlowerDance\nDescription: a person wearing a long flower dress is dancing and twirling around.\nSpecific features: textured clothes; as the person moves and twirls, the dress is folding and shifting, creating a complex 3D geometry.#PTS\n10 #10^5\n10\n0\n0\n0\nAverage: 755 299\nMin: 650 961\nMax: 913 621
Name: FlowerWave\nDescription: a person wearing a long flower dress greets by waving hand.\nSpecific features: textured clothes; only upper-body movement; no dress movement; simple 3D geometry.#PTS\n10 #10^5\n6\n0\nAverage: 691 334\nMin: 659 972\nMax: 708 898Name: Gymnast\nDescription: a person stands on one leg and does a leg hold.\nSpecific features: unicolor clothes; leg movement creates surface occlusion.#PTS\n5.5 #10^5\n5\n0\n0\n0\nAverage: 523 078\nMin: 509 576\nMax: 551 604
Name: HelloGoodbye\nDescription: a person wearing a long black dress enters the scene, greets by waving hand, and leaves the scene.\nSpecific features: unicolor clothes; empty capture space at sequence start and end.#PTS\n10 #10^5\n5\n0\nAverage: 639 807\nMin: 133 276\nMax: 929 588Name: ReadyForWinter\nDescription: a person puts on a beanie and a scarf.\nSpecific features: unicolor clothes; textured scarf; interaction with accessories creates complex surface structures and surface occlusions.#PTS\n7.0 #10^5\n7.0\n7.0\n0\nFrame ID\nAverage: 794 317\nMin: 730 913\nMax: 883 642
", + "image_path": "8bcf6a46d1ef70ca523d9e20c79aa14744e9c04566f6fdf3b294a8d3447a3d42.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 44, + 568, + 290, + 708 + ], + "blocks": [ + { + "bbox": [ + 77, + 557, + 255, + 565 + ], + "lines": [ + { + "bbox": [ + 77, + 557, + 255, + 565 + ], + "spans": [ + { + "bbox": [ + 77, + 557, + 255, + 565 + ], + "type": "text", + "content": "TABLE IV. UVG-VPC DATASET CHARACTERIZATION" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 44, + 568, + 290, + 708 + ], + "lines": [ + { + "bbox": [ + 44, + 568, + 290, + 708 + ], + "spans": [ + { + "bbox": [ + 44, + 568, + 290, + 708 + ], + "type": "table", + "html": "
SequenceSpeedMotion fieldRGB texture3D geometrySurface occlusion
BlueBackpackFastMediumSimpleComplexPlenty of
BlueSpinMediumDenseSimpleSimpleLittle
BlueSquatFastDenseSimpleMediumMedium
CasualSpinMediumDenseMediumSimpleLittle
CasualSquatFastDenseMediumMediumMedium
ElegantDanceFastDenseSimpleComplexPlenty of
ElegantWaveSlowSparseSimpleSimpleLittle
FlowerDanceFastDenseComplexComplexPlenty of
FlowerWaveSlowSparseComplexSimpleLittle
GymnastMediumMediumSimpleSimpleMedium
HelloGoodbyeMediumMediumSimpleMediumPlenty of
ReadyForWinterMediumMediumMediumComplexPlenty of
", + "image_path": "5be5397b2dddb8bf27fafc0d3ae7ba60ba325d54a8206b360cdde882b0effc85.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 42, + 713, + 289, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 713, + 289, + 734 + ], + "spans": [ + { + "bbox": [ + 42, + 713, + 289, + 734 + ], + "type": "text", + "content": "sequences were specifically designed to contrast with each other in terms of one or more of these criteria." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 384, + 556, + 460, + 565 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 384, + 556, + 460, + 565 + ], + "spans": [ + { + "bbox": [ + 384, + 556, + 460, + 565 + ], + "type": "text", + "content": "V. CONCLUSION" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 571, + 550, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 571, + 550, + 660 + ], + "spans": [ + { + "bbox": [ + 302, + 571, + 550, + 660 + ], + "type": "text", + "content": "This paper presented the UVG-VPC open dataset, which has been carefully designed to facilitate the development, evaluation, and validation of V-PCC coding technology. The dataset consists of 12 voxelized point cloud sequences and associated normals. We believe that the availability of the UVG-VPC dataset will enable researchers and practitioners to advance the state-of-the-art in point cloud compression and foster its deployment in immersive visual media applications." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 385, + 668, + 469, + 677 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 385, + 668, + 469, + 677 + ], + "spans": [ + { + "bbox": [ + 385, + 668, + 469, + 677 + ], + "type": "text", + "content": "ACKNOWLEDGMENT" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 682, + 550, + 737 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 682, + 550, + 737 + ], + "spans": [ + { + "bbox": [ + 302, + 682, + 550, + 737 + ], + "type": "text", + "content": "This work was carried out with the support of Centre for Immersive Visual Technologies (CIVIT) research infrastructure, Tampere University, Finland. In addition, the authors wish to acknowledge CSC - IT Center for Science, Finland, for computational and storage resources." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 139, + 54, + 192, + 64 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 54, + 192, + 64 + ], + "spans": [ + { + "bbox": [ + 139, + 54, + 192, + 64 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 44, + 68, + 289, + 290 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 44, + 68, + 289, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 68, + 289, + 105 + ], + "spans": [ + { + "bbox": [ + 44, + 68, + 289, + 105 + ], + "type": "text", + "content": "[1] ISO/IEC 23090-5:2021. \"Information technology — coded representation of immersive media — part 5: visual volumetric video-based coding (V3C) and video-based point cloud compression (V-PCC),\" Jun. 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 44, + 106, + 289, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 106, + 289, + 142 + ], + "spans": [ + { + "bbox": [ + 44, + 106, + 289, + 142 + ], + "type": "text", + "content": "[2] D. Graziosi, et al., \"An overview of ongoing point cloud compression standardization activities: video-based (V-PCC) and geometry-based (G-PCC),\" APSIPA Trans. Signal Information Process., vol. 9, Apr. 2020." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 142, + 288, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 142, + 288, + 161 + ], + "spans": [ + { + "bbox": [ + 44, + 142, + 288, + 161 + ], + "type": "text", + "content": "[3] J. M. Boyce, et al., \"MPEG immersive video coding standard,\" Proc. IEEE, vol. 109, no. 9, pp. 1521-1536, Sep. 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 161, + 289, + 188 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 161, + 289, + 188 + ], + "spans": [ + { + "bbox": [ + 44, + 161, + 289, + 188 + ], + "type": "text", + "content": "[4] V. K. M. Vadakital, et al., “The MPEG immersive video standard—current status and future outlook,” IEEE MultiMedia, vol. 29, no. 3, pp. 101–111, Jul.–Sep. 2022." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 189, + 289, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 189, + 289, + 206 + ], + "spans": [ + { + "bbox": [ + 44, + 189, + 289, + 206 + ], + "type": "text", + "content": "[5] ISO/IEC JTC1/SC29/WG11, \"Common test conditions for V3C and V-PCC,\" Document N19518, Online, Jul. 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 207, + 289, + 234 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 207, + 289, + 234 + ], + "spans": [ + { + "bbox": [ + 44, + 207, + 289, + 234 + ], + "type": "text", + "content": "[6] I. Reimat, et al., \"CWIPC-SXR: point cloud dynamic human dataset for social XR,\" in Proc. ACM Multimedia Sys. Conf., pp. 300-306, Istanbul, Turkey, Sep. 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 235, + 289, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 235, + 289, + 252 + ], + "spans": [ + { + "bbox": [ + 44, + 235, + 289, + 252 + ], + "type": "text", + "content": "[7] R. Pagés, K. Amplanitis, J. Ondrej, E. Zerman, and A. Smolic, \"Volograms & V-SENSE volumetric video dataset,\" Mar. 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 253, + 289, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 253, + 289, + 290 + ], + "spans": [ + { + "bbox": [ + 44, + 253, + 289, + 290 + ], + "type": "text", + "content": "[8] E. d'Eon, B. Harrison, T. Myers, and P. A. Chou, \"8i voxelized full bodies - a voxelized point cloud dataset,\" ISO/IEC JTC1/SC29 Joint WG11/WG1, Document WG11M40059/WG1M74006, Geneva, Switzerland, Jan. 2017." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 305, + 53, + 550, + 275 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 305, + 53, + 550, + 80 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 53, + 550, + 80 + ], + "spans": [ + { + "bbox": [ + 305, + 53, + 550, + 80 + ], + "type": "text", + "content": "[9] Y. Xu, Y. Lu, and Z. Wen, \"Owlii dynamic human mesh sequence dataset,\" ISO/IEC JTC1/SC29/WG11 Document M41658, Macau, China, Oct. 2017." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 82, + 550, + 108 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 82, + 550, + 108 + ], + "spans": [ + { + "bbox": [ + 305, + 82, + 550, + 108 + ], + "type": "text", + "content": "[10] M. Krivokuca, P. A. Chou, and P. Savill, \"8i voxelized surface light field (8iVSLF) dataset,\" ISO/IEC JTC1/SC29 WG11, Document M42914, Ljubljana, Slovenia, Jul. 2018." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 109, + 550, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 109, + 550, + 126 + ], + "spans": [ + { + "bbox": [ + 305, + 109, + 550, + 126 + ], + "type": "text", + "content": "[11] UVG-VPC Licence, [Online], Available: https://ultravideo.fi/UVG-VPC/ licence.pdf, Accessed: May. 26, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 127, + 550, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 127, + 550, + 145 + ], + "spans": [ + { + "bbox": [ + 305, + 127, + 550, + 145 + ], + "type": "text", + "content": "[12] Mantis Vision Website, [Online], Available: https://mantis-vision.com/, Accessed: Apr. 26, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 146, + 550, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 146, + 550, + 173 + ], + "spans": [ + { + "bbox": [ + 305, + 146, + 550, + 173 + ], + "type": "text", + "content": "[13] E. W. Weisstein, “Triangle point picking,” [Online], Available: https://mathworld.wolfram.com/TrianglePointPicking.html, Accessed: Apr. 26, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 174, + 550, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 174, + 550, + 191 + ], + "spans": [ + { + "bbox": [ + 305, + 174, + 550, + 191 + ], + "type": "text", + "content": "[14] Q.-Y. Zhou, J. Park, and V. Koltun, \"Open3D: a modern library for 3D data processing,\" arXiv:1801.09847, Jan. 2018." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 192, + 550, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 192, + 550, + 218 + ], + "spans": [ + { + "bbox": [ + 305, + 192, + 550, + 218 + ], + "type": "text", + "content": "[15] D. Tian, H. Ochimizu, C. Feng, R. Cohen, and A. Vetro, “Evaluation metrics for point cloud compression,” ISO/IEC JTC1/SC29/WG11, Document M39966, Geneva, Switzerland, Jan. 2017." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 219, + 550, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 219, + 550, + 246 + ], + "spans": [ + { + "bbox": [ + 305, + 219, + 550, + 246 + ], + "type": "text", + "content": "[16] D. Tian, H. Ochimizu, C. Feng, R. Cohen, and A. Vetro, “Updates and integration of evaluation metric software for PCC,” ISO/IEC JTC1/SC29/WG11, Document M40522, Hobart, Australia, Apr. 2017." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 247, + 550, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 247, + 550, + 275 + ], + "spans": [ + { + "bbox": [ + 305, + 247, + 550, + 275 + ], + "type": "text", + "content": "[17] M. Wien, J. Jung, and V. Baroncini, \"Formal visual evaluation and study of objective metrics for MPEG dynamic mesh coding,\" in Proc. Eur. Workshop Vis. Inf. Process., Lisbon, Portugal, Sep. 2022." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06122/1f4b320c-f80c-4d99-b14d-3a49e20634f9_content_list.json b/data/2025/2504_06xxx/2504.06122/1f4b320c-f80c-4d99-b14d-3a49e20634f9_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..7687ce21806e381bcaa308151fcc9c4380c3311d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/1f4b320c-f80c-4d99-b14d-3a49e20634f9_content_list.json @@ -0,0 +1,2276 @@ +[ + { + "type": "text", + "text": "Leanabell-Prover: Posttraining Scaling in Formal Reasoning", + "text_level": 1, + "bbox": [ + 129, + 133, + 868, + 158 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jingyuan Zhang, Qi Wang, Xingguang Ji, Yahui Liu, Yang Yue, Fuzheng Zhang, Di Zhang, Guorui Zhou, Kun Gai", + "bbox": [ + 220, + 187, + 776, + 223 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kuaishou Technology", + "bbox": [ + 406, + 237, + 591, + 254 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 448, + 296, + 549, + 313 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advances in automated theorem proving (ATP) through LLMs have highlighted the potential of formal reasoning with Lean 4 codes. However, ATP has not yet been revolutionized by the recent posttraining scaling as demonstrated by Open AI O1/O3 and Deepseek R1. In this work, we investigate the entire posttraining of ATP, aiming to align it with breakthroughs in reasoning models in natural languages. To begin, we continual train current ATP models with a hybrid dataset, which consists of numerous statement-proof pairs, and additional data aimed at incorporating cognitive behaviors that emulate human reasoning and hypothesis refinement. Next, we explore reinforcement learning with the use of outcome reward returned by Lean 4 compiler. Through our designed continual training and reinforcement learning processes, we have successfully improved existing formal provers, including both DeepSeek-Prover-v1.5 and Goedel-Prover, achieving state-of-the-art performance in the field of whole-proof generation. For example, we achieve a $59.8\\%$ pass rate (pass@32) on MiniF2F. This is an on-going project and we will progressively update our findings, release our data and training details.", + "bbox": [ + 110, + 338, + 885, + 561 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d54d3dc99f0f5608177788c79a253bd2e0a99094c8f633e30787e913f16ecf88.jpg", + "image_caption": [ + "Figure 1 | Benchmark performance on MiniF2F-test (Zheng et al., 2021). Our method boosts both the two baseline models after employing RL training. Goedel-Prover-RL is our implementation. Our framework surpasses DeepSeek-Prover-v1.5-RL and Goedel-Prover-SFT $6.6\\%$ and $2.2\\%$ , respectively." + ], + "image_footnote": [], + "bbox": [ + 235, + 579, + 759, + 785 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.06122v3 [cs.AI] 14 Jul 2025", + "bbox": [ + 21, + 297, + 60, + 701 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "$^{\\text{念}}$ Equal contributions, and order alphabetically by first name. ${}^{\\dagger}$ Corresponding author.", + "bbox": [ + 112, + 920, + 811, + 939 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 115, + 99, + 272, + 116 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recent large language models (LLMs), such as Open AI O1/O3 and Deepseek R1, which are enhanced by posttraining scaling, emerge with numerous powerful and intriguing reasoning behaviors (Guo et al., 2025; Anthropic, 2025; Team, 2025). Such LLMs have shown impressive performance in solving math problems with natural language. However, the long chain-of-thoughts (CoTs) and final answers in natural language (NL) are substantially challenging for peer review (Wang et al., 2024), especially for theorem proving. Meanwhile, the key advantage of formal languages lies in their verifiability—each reasoning step can be validated by formal theorem verifiers, e.g., Lean (De Moura et al., 2015; Moura and Ullrich, 2021) and Isabelle (Paulson, 1994). As a promising direction, automated theorem proving (ATP) with formal languages (FL) has attracted booming attention from the community of large language models (LLMs).", + "bbox": [ + 115, + 131, + 884, + 292 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Contrary to solving math problems with natural language, generating proofs using Lean 4 codes (or other formal languages) is more challenging. For example, DeepSeek-Prover v1.5-RL (Xin et al., 2024) achieves only $50.0\\%$ (pass@32) on the Olympiad-level mathematics benchmark MiniF2F (Zheng et al., 2021). However, DeepSeek-R1 (Guo et al., 2025) can achieve $100\\%$ on the same math problems, but in natural language. DeepSeek-Prover-v1.5 (Lin et al., 2025) and STP (Dong and Ma, 2025) show that using extensive synthetic dataset of formal statements and expert iteration (Polu et al., 2022) can boost the whole-proof prover. Meanwhile, some methods (Yang et al., 2023; Wu et al., 2024; Xin et al., 2025) scale up the search budget (e.g., more than 2 million in BFS-Prover (Xin et al., 2025)) for step-wise tactic generation, which seems extremely computational.", + "bbox": [ + 115, + 300, + 884, + 460 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Although RL strategies have already proven their effectiveness in natural language for math problem solving, the performance in formal reasoning has been rather ordinary so far. We find that only Deepseek-Prover releases it RL version, and DeepSeek-Prover-v1.5-RL marginally improves $1.8\\%$ than its supervised fine-tuned model. Compared to the success of reinforcement learning (RL) in natural language reasoning, the potential for improvement in formal language reasoning may still be vast. However, replicating the current successful RL training approaches, which primarily focus on the Qwen2.5 model series, is not straightforward.", + "bbox": [ + 115, + 469, + 884, + 581 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To verify the posttraining scaling in ATP, we begin with the standard whole-proof generation models DeepSeek-Prover-v1.5-SFT and Goedel-Prover-SFT. There are three training stages in our optimization framework. We first collect public and synthetic data to continue training. We also utilize automatic synthetic CoT data to embed the self-reflection capabilities, such as backtracking (abandoning failing approaches) and verification (systematic error-checking), to the fine-tuned model. Next, we employ the GRPO algorithm (Shao et al., 2024) to perform reinforcement learning from proof assistant feedback (RLPAF) on the supervised fine-tuned model. Similar to DeepSeek-Prover-v1.5-RL, the verification results from the Lean compiler serve as reward supervision. After analyzing the validation results on benchmarks, we find our posttraining strategies can effectively boost the overall performance on MiniF2F (Zheng et al., 2021) benchmark.", + "bbox": [ + 115, + 589, + 884, + 766 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, here are our main contributions:", + "bbox": [ + 144, + 775, + 512, + 790 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We continue train current APT models with more high quality statement-proof data pairs. More importantly, we design synthetic data to enhance the models' self-reflection capabilities, enabling us to pilot cognitive behaviors in our models before applying the RL algorithm.", + "- We investigate the RL training to boost the ATP prover that generates whole mathematical proofs in Lean 4 codes. During training, we employ the Lean 4 verifier to serve as a reward" + ], + "bbox": [ + 139, + 804, + 878, + 901 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 492, + 923, + 504, + 934 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "supervision.", + "bbox": [ + 159, + 102, + 263, + 116 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The proposed Leanabelle-Prover achieves state-of-the-art performance through our meticulously designed strategy, $59.8\\%$ (pass@32) on MiniF2F-test.", + "- Currently, we collect around 1.52M formal statements, and 0.22M formal statements with detailed informal CoTs and verified proofs. All intermediate models and training data are released to the community1." + ], + "bbox": [ + 139, + 117, + 884, + 198 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 112, + 222, + 282, + 239 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "**Lean4 Theorem Proving using LLMs.** With the rapid progress of LLMs, research has explored applying LLMs in FL reasoning to automate theorem proving. Prior research can be briefly classified into two strategies, namely proof-step generation and whole-proof generation.", + "bbox": [ + 112, + 254, + 882, + 305 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Proof-step generation methods train an LLM agent to iteratively generate proof steps by predicting the next tactic based on the current proof state (Polu and Sutskever, 2020; Polu et al., 2022; Lample et al., 2022; Azerbayev et al., 2023; Yang et al., 2023; Lin et al., 2024; DeepMind, 2024; Trinh et al., 2024; Wu et al., 2024; Xin et al., 2024; Li et al., 2024; Xin et al., 2025). These methods apply FL executor to verify after each step of generation and is able to discover some non-trivial proofs. For example, LeanDojo (Yang et al., 2023) first establishes relationship models between various tactic states within proofs. It then retrieves relevant premises from the mathematical library based on the current output state (as collected from a Lean verifier) and inputs these premises into an encoder-decoder model to generate the subsequent tactic. Employing Monte-Carlo tree search (MCTS) (Coulom, 2006) is another common solution in this field. However, as the complexity of the proof increases, tree search methods become computationally expensive and lack high-level NL planning to control the overall structure of the proof (Wang et al., 2025).", + "bbox": [ + 110, + 310, + 885, + 505 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Whole-proof generation methods treat theorem proving as a kind of code generation problem, where LLMs generate the entire proof in a single attempt by using supervised training or prompt engineering (Xin et al., 2024; Lin et al., 2025; Dong and Ma, 2025; Wang et al., 2025). This approach leverages the NL reasoning and high-level planning capabilities of LLMs with predictable computation costs, but lacks intermediate feedback from FL executors. Thus, the core challenge for improving whole-proof generation is that there are no sufficient Lean 4 codes to eliminate the gaps between NL and FL modalities. However, generating such data requires high levels of expertise, making it difficult to scale. As a result, the generated proofs often lack post-hoc analysis of errors and tend to perform badly on tedious questions that require non-trivial solutions.", + "bbox": [ + 110, + 511, + 882, + 674 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Reinforcement Learning for Lean4 Theorem Proving. There are two typical solutions to utilize RL for Lean4 Theorem Proving. In DeepSeek-Prover-v1.5-RL (Xin et al., 2024), the authors employ GRPO algorithm and takes the feedback signals from Lean 4 verifier as reward that reveals the proofs verified as correct or wrong. Such methods only uses the compilation feedback from the entire proof process as the reward result. In this paper, we employ the whole-proof generation approach, so we continue with this same solution. In contrast, Xin et al. (2025) use DPO (Rafailov et al., 2023) to refine the policy LLM by leveraging preference pairs naturally generated during tree search such as MCTS (Coulom, 2006). Therefore, the second solution utilizes the tactic state of each step during the compilation process. However, the effectiveness of existing methods still need improvement.", + "bbox": [ + 110, + 697, + 882, + 860 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "1https://github.com/Leanabell-LM/Leanabell-Prover", + "bbox": [ + 139, + 887, + 571, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 492, + 922, + 505, + 935 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Cognitive Behaviors Gandhi et al. (2025) first reveal that models without integrating human-like reasoning abilities (e.g., verification, backtracking, subgoal setting and backward chaining) are hard to obtain significant improvements by directly applying RL algorithms. Similarly, Wan et al. (2025) propose that LLMs lack a specialized design for acquiring meta-thinking, resulting in low efficacy. Zeng et al. (2025) and Liu et al. (2025) propose that the training may directly start from the base models with such cognitive behaviors—a paradigm referred to as zero RL training. Considering the barely satisfactory performance of RL strategies in the formal reasoning field, we have reasons to suspect whether this is due to this fundamental reason.", + "bbox": [ + 110, + 101, + 887, + 231 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3. Model Training", + "text_level": 1, + "bbox": [ + 112, + 254, + 302, + 273 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1. Continual Training", + "text_level": 1, + "bbox": [ + 112, + 288, + 317, + 305 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Base Model. We begin with the previous whole-proof generation models DeepSeek-Prover v1.5-SFT (Xin et al., 2024) and Goedel-Prover (Lin et al., 2025) that are two well-trained versions after the supervised fine-tuning stage. Both of these two models are with 7 billion parameters. Specifically, the two models are trained with proofs added detailed explanatory informal comments. Therefore, the model possesses the basic ability to align natural language descriptions with Lean 4 codes.", + "bbox": [ + 110, + 313, + 885, + 411 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/7359a344dc4b38409c46e9cce1237d8e32c0529d3ecee4ce0d6110ab088be7ee.jpg", + "image_caption": [ + "Figure 2 | Distributions of math domains in various Lean 4 dataset. Lean Workbook, Goedel-Prover, STP Lean and NuminaMath are training set. MiniF2F and ProofNet are test set." + ], + "image_footnote": [], + "bbox": [ + 119, + 423, + 875, + 741 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Statement Formalization. Similar to Lin et al. (2025), we train a formalizer, based on Qwen2.5-32B-Coder-Instruct (Hui et al., 2024), to convert the olympiad-level math problems in natural language into formal statements. We collect the formal and informal statement pairs sourced", + "bbox": [ + 112, + 824, + 885, + 873 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 492, + 923, + 505, + 935 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/b7cc8430f6be44158dab40da3bc8875fd37fa636e1efd34a5b31f9efe81e607d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Data SourceFSFS+IC+PF
Lean Workbook (Ying et al., 2024)140K40K
STP-Lean (Dong and Ma, 2025)400K36K
NuminaMath (Li et al., 2024)520K97K
AoPS (AoPS)370K26K
PromptCoT (Zhao et al., 2025)90K20K
Total1.52M0.22M
", + "bbox": [ + 262, + 98, + 736, + 235 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 1 | Distribution of our training sources, including released data by existing models and synthetic data from informal math problems. FS, PF and IC refer to formal statements, proofs and informal comments, respectively.", + "bbox": [ + 112, + 244, + 882, + 294 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "from Goedel-Prover $^{2}$ (around 30K). The training details of our formalizer are presented in Appendix A. With the trained formalizer, we convert the natural language statement and then verify with Lean 4 compiler. After these procedures, we finally get 520K formal statements from NuminaMath (Li et al., 2024) and 370K formal statements from AoPS (AoPS). Recently, Zhao et al. (2025) proposed to utilize LLMs to synthesize math problems through mathematical concepts. Based on the data analysis in Figure 2, we employ the PromptCoT framework to synthesize math problems at different levels, including AMC, AIME and USAMO, on three majority math domains (i.e., algebra, number theory and calculus).", + "bbox": [ + 110, + 317, + 884, + 450 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Data Curation. We develop a comprehensive Lean 4 code completion dataset for the supervised fine-tuning, as shown in Table 1. These theorems are sourced from various projects, such as the standard Lean 4 math library Mathlib4 (mathlib4, 2025), Lean Workbook (Ying et al., 2024), synthetic theorems from Goedel-Prover (Lin et al., 2025) and STP (Dong and Ma, 2025). Besides the above-mentioned public data, we use deepseek-prover and goedel-prover to sample proofs that can be correctly verified by Lean 4 compiler. In this manner, we collect Lean data from NuminaMath and AoPS. Similarly, we formalize around 90K math problems synthesized through PromptCoT. In DeepSeek-Prover-v1.5 (Xin et al., 2024), the authors claim that incorporating natural language reasoning before generating theorem proof code can eliminate the gap between problem solving strategies in natural language and theorem proving in Lean. Thus, we also collect comments for part of the data. Finally, we collected around 1.52M formal statements and 0.22M statements with detailed informal comments and verified proofs.", + "bbox": [ + 110, + 473, + 882, + 668 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown in Figure 2, we find that the data distributions of Lean Workbook, Goedel-Prover, STP Lean and NuminaMath cover well the MiniF2F test set. On the contrast, there is an obvious domain bias for ProofNet, which is also revealed by (Lin et al., 2025). It indicates a promising direction for further improvement by expanding data in specific mathematical domains. For fairness, we do not adjust the distributions of the training set in our optimizations.", + "bbox": [ + 110, + 674, + 882, + 757 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Implementation After obtaining a large collection of formalized statements with proofs, we continuously train once. We use the lightweight framework SWIFT3 for the supervised fine-tuning (SFT). The SFT experiment is trained on 8 NVIDIA H100 GPUs with the following hyperparameters: a learning rate of $5 \\times 10^{-5}$ , a global batch size of 32 over 2 epochs, and a weight decay coefficient of 0.1.", + "bbox": [ + 110, + 781, + 882, + 863 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "$^{2}$ https://huggingface.co/datasets/Goedel-LM/Lean-workbook-proofs", + "bbox": [ + 137, + 873, + 695, + 888 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "3https://github.com/modelscope/ms-swift", + "bbox": [ + 139, + 888, + 482, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 492, + 922, + 505, + 935 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2. Integrating Cognitive Behaviors", + "text_level": 1, + "bbox": [ + 112, + 101, + 426, + 118 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Gandhi et al. (2025) have revealed the importance of cognitive behaviors in base models before utilizing RL strategies. Following a similar idea, we induce reflection-like behaviors through specially designed interventions. We propose two kinds of CoT templates to integrate the self-reflection capabilities: Lean completion and rewriting.", + "bbox": [ + 112, + 127, + 884, + 193 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Lean Completion. We sample 5K formal statements used in our previous continue training stage, which are not $100\\%$ correct in the pass@16 sampling. We find the position where the first error tactic appears, and re-sample for completion. Once collecting new valid proofs, we can use the following prompt to ask Claude (Anthropic, 2025) to generate the reflection response. We observe that an effective reflection process can only be achieved by combining incorrect proofs, correct proofs, and tactic error messages. Thus, the feedback collected from Lean verifier is also used to create the prompt. An example is presented in Section B.1.", + "bbox": [ + 110, + 218, + 884, + 332 + ], + "page_idx": 5 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Lean Completion Prompt (Claude) \n# Initial Proof \n' $\\text{巧} ^ { \\prime }$ lean4 \n{old_code} \n# Lean Feedback \n{error} \n# Correct Proof \n' $\\text{巧} ^ { \\prime }$ lean4 \n{new_code} \nYour task is to generate a reflection of a Lean4 proof as follows: 1. You are provided with a lean proof code that failed to complete the proof, the verify feedback, and a revised correct proof. 2. You need to act as a verifier to check the code step by step and point out where the code fails with incorrect tactics. 3. Provide an alternative method, such as those in the correct proof. 4. Act as you are verifying your own proof.. Here are some rules you need to follow: 1. At the beginning, you should start with a conjunction phrase such as 'let's verify' and claim you need to verify the proof. 2. Instead of directly pointing out the issue, your answer should show the process to identify the incorrect step. 3. Do not refer to Lean Feedback, Correct Proof, or anything that shows you have already known the issue before your reflection. 4. Do not provide any new Lean4 code block, you don't need to write a correct proof. 5. Do not include a summary section. 6. Again, do not refer to Lean Feedback, Correct Proof, do not write anything like 'as shown in the correct proof'. Now, start with a conjunction phrase and require you need to check the proof, do not directly claim there is an issue.", + "guess_lang": "txt", + "bbox": [ + 115, + 350, + 878, + 772 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Rewriting. Based on the above-mentioned Lean completion, there are two main steps in the rewriting strategy. First, we suspect that the generation of incorrect proofs is, to some extent, due to the incorrect problem-solving comments being generated. Therefore, we introduce Qwen2.5-72B-instruct (Team, 2024) to evaluate the problem-solving comments and then regenerate the correct problem-solving comments. Second, we provide Claude with both the invalid and newly rewritten valid Lean 4 code to generate comprehensive Chains of Thought (CoTs) that explain", + "bbox": [ + 112, + 797, + 884, + 897 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 492, + 923, + 505, + 935 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "the reasoning process. In this manner, we collect 19K samples with CoTs (See the detailed examples in Appendix B.2). Here are the prompt templates for these two steps:", + "bbox": [ + 112, + 101, + 880, + 133 + ], + "page_idx": 6 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Rewriting - Step 1 (Qwen2.5-72B-Instruct)" + ], + "code_body": "You are an experienced mathematics evaluation teacher. You will be provided with a math problem and the corresponding solution idea.. \nPlease determine whether the solution idea is correct. If it is, please output \"Correct\", otherwise please output \"Incorrect\". If the solution idea is incorrect, please provide the correct solution idea, and the output of the solution idea should be included within \\*\\* and \\*\\*.", + "guess_lang": "txt", + "bbox": [ + 129, + 168, + 855, + 237 + ], + "page_idx": 6 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "The output format is as follows:", + "guess_lang": "txt", + "bbox": [ + 134, + 248, + 389, + 261 + ], + "page_idx": 6 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "1. Judgement: Incorrect. Solution: “‘‘Solution idea’’”", + "guess_lang": "txt", + "bbox": [ + 134, + 269, + 591, + 282 + ], + "page_idx": 6 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "2. Judgement: Correct.", + "guess_lang": "txt", + "bbox": [ + 136, + 282, + 326, + 294 + ], + "page_idx": 6 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "[math problem start] \n{problem} \n[math problem end]", + "guess_lang": "txt", + "bbox": [ + 134, + 304, + 295, + 338 + ], + "page_idx": 6 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "[solution idea start] \n{solution} \n[solution idea end]", + "guess_lang": "txt", + "bbox": [ + 134, + 349, + 302, + 384 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "With these synthesized data, we employ our second-stage continual training, with a learning rate of $5 \\times 10^{-5}$ and overall batch size of 1024 for one epoch. Finally, we obtain the model, named as Leanabell-Prover-SFT.", + "bbox": [ + 112, + 419, + 884, + 467 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.3. Reinforcement Learning", + "text_level": 1, + "bbox": [ + 112, + 489, + 359, + 506 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We integrate reinforcement learning (RL) with the Lean 4 theorem prover to automate the discovery of valid proofs. The RL agent interacts with the Lean 4 environment, generating whole proofs and receiving feedback from Lean 4 compiler as reward signals. The agent's objective is to maximize cumulative rewards by learning to generate syntactically correct, logically valid proofs for an input formal statement.", + "bbox": [ + 112, + 516, + 882, + 596 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Policy Optimization Algorithms. We employ the recent GRPO (Shao et al., 2024) as our RL algorithm. For each input formal statement $q$ , GRPO samples a group of outputs $\\{o_1, o_2, \\dots, o_G\\}$ from the old policy $\\pi_{\\theta_{old}}$ and then collect the feedback $\\{\\tau_1, \\tau_2, \\dots, \\tau_G\\}$ for the group of responses through Lean 4 compiler. According to each feedback status $\\tau_i$ , we assign a particular reward. Then, the advantage of the $i$ -th output is calculated by normalizing the group-level rewards $\\{R_1, R_2, \\dots, R_G\\}$ :", + "bbox": [ + 112, + 621, + 884, + 719 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} _ {i, t} = \\frac {r _ {i} - \\operatorname {m e a n} \\left(\\left\\{R _ {i} \\right\\} _ {i = 1} ^ {G}\\right)}{\\operatorname {s t d} \\left(\\left\\{R _ {i} \\right\\} _ {i = 1} ^ {G}\\right)}. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 396, + 717, + 882, + 755 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Finally, we optimizes the policy model $\\pi_{\\theta}$ by maximizing the following objective:", + "bbox": [ + 112, + 759, + 774, + 776 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {J} _ {\\mathrm {G R P O}} (\\theta) = \\mathbb {E} _ {(q, a) \\sim \\mathcal {D}, \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta_ {\\mathrm {o l d}}} (\\cdot | q)} \\\\ \\left. \\right.\\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\frac {1}{| o _ {i} |} \\sum_ {t = 1} ^ {| o _ {i} |} \\left(\\min \\left(r _ {i, t} (\\theta) \\hat {A} _ {i, t}, \\operatorname {c l i p} \\left(r _ {i, t} (\\theta), 1 - \\varepsilon , 1 + \\varepsilon\\right) \\hat {A} _ {i, t}\\right)\\right)\\right], \\tag {2} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 216, + 785, + 882, + 853 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where", + "bbox": [ + 112, + 860, + 169, + 873 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nr _ {i, t} (\\theta) = \\frac {\\pi_ {\\theta} \\left(o _ {i , t} \\mid q , o _ {i , < t}\\right)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} \\left(o _ {i , t} \\mid q , o _ {i , < t}\\right)}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 389, + 870, + 882, + 906 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 492, + 923, + 504, + 934 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "$\\varepsilon$ is a hyperparameter. In our experiments, we set $\\varepsilon = 0.2$ . Notably, we do not use the Kullback-Leibler (KL) divergence penalty.", + "bbox": [ + 112, + 101, + 887, + 134 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Rewriting - Step 2 (Claude)", + "text_level": 1, + "bbox": [ + 378, + 154, + 618, + 168 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Wrong code", + "bbox": [ + 134, + 173, + 233, + 183 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "\" ' lean4", + "bbox": [ + 134, + 184, + 201, + 193 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "lean code1", + "bbox": [ + 136, + 196, + 216, + 206 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": ",", + "bbox": [ + 136, + 206, + 161, + 215 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Correct code", + "bbox": [ + 134, + 229, + 250, + 239 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "\" ' lean4", + "bbox": [ + 136, + 240, + 201, + 250 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "lean code2", + "bbox": [ + 136, + 252, + 218, + 262 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": ",", + "bbox": [ + 136, + 263, + 161, + 269 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "I have given you with two Lean4 code solutions to the same problem. The first solution fails to compile in Lean4, while the second solution compiles successfully.", + "bbox": [ + 134, + 285, + 821, + 309 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Your task is to:", + "bbox": [ + 136, + 309, + 263, + 319 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Act as a verification assistant and carefully compare these two code snippets.", + "2. Identify the specific errors or flawed strategies in the first solution that caused compilation failure.", + "3. Explain the reasoning process that would lead someone from the incorrect approach to the correct solution." + ], + "bbox": [ + 136, + 319, + 828, + 375 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "When analyzing the code, please simulate the thought process of someone examining their own proof. Begin sections of your analysis with phrases like \"Let's verify my proof...\", \"Wait, I see an issue here...\", or \"Let me reconsider this approach...\" This should demonstrate how someone might catch and correct their own mistakes.", + "bbox": [ + 134, + 386, + 858, + 432 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The analysis emphasizes conceptual understanding over syntax details, explaining the fundamental logical or strategic errors in the initial solution and demonstrating how the corrected solution properly addresses these conceptual problems.", + "bbox": [ + 134, + 442, + 836, + 478 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Please structure your response with:", + "bbox": [ + 134, + 488, + 420, + 499 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Identification of specific errors in the first solution.", + "- Explanation of the conceptual issues that led to these errors.", + "- How to fix the conceptual problems in error so as to generate the problem-solving idea of the second solution?" + ], + "bbox": [ + 134, + 500, + 847, + 543 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Do not provide any new Lean4 code beyond what I've given you - focus exclusively on analyzing the provided code. Don't include the phased titles in the output results, such as \"Identification of Specific Errors in the First Solution\", \"Conceptual Issues That Led to These Errors\", etc. Also, don't use expressions like \"the first solution\" or \"the second solution\". Use \"current solution\" to represent \"first solution\". Although you used the second solution for auxiliary analysis, avoid revealing in your response that you've seen its content. For example, refrain from saying things like 'I noticed that in the new solution.' Instead, respond as if you're thinking independently, based solely on the first solution.", + "bbox": [ + 134, + 554, + 853, + 657 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Reward Function. Unlike stepwise rewards, the feedback is provided only after the full proof is compiled by Lean 4 verifier in our experiments. Our rewards are derived from: (1) Terminal reward $R_{\\text{success}}$ : a scalar reward granted if the entire proof is validated successfully by Lean 4 verifier. (2) Penalty $R_{\\text{fail}}$ : a negative reward for proofs with critical errors (e.g., type mismatches, infinite loops, unsolved goals and etc). Moreover, we observe that there are warnings in the feedback, such as some unnecessary or redundant steps have no negative effects on the final validation. In our experiments, we ignore warning cases as long as the compilation and verification process passes successfully. So, given the feedback $\\tau$ from Lean 4 compiler, our final reward function can be formulated as:", + "bbox": [ + 112, + 688, + 884, + 832 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nR (\\tau) = \\left\\{ \\begin{array}{l l} R _ {\\text {s u c c e s s}} & \\text {i f L e a n 4 f u l l y v a l i d a t e s} \\tau \\\\ R _ {\\text {f a i l}} & \\text {o t h e r w i s e (s y n t a x e r r o r s / t i m e o u t)} \\end{array} \\right. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 282, + 841, + 882, + 884 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 492, + 923, + 505, + 935 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Implementation We conduct RL training based on the Leanabell-Prover-SFT. We use a constant learning rate of $1 \\times 10^{-6}$ . For each theorem, we sample a group of 32 candidate proofs, with maximal rollout length set to 8192. The training global batch size is set to $32 \\times 32 = 1024$ . On the RL training data, we select samples from those whose number of validations in Pass@32 falls within the range of [2, 16]. We believe this subset of data has a certain level of difficulty while providing exploration space, making it effective for updating the policy model. The detailed distribution of pass@32 is presented in Figure 6 in Appendix C. Finally, we obtain the RL version model named as Leanabell-Prover-RL.", + "bbox": [ + 115, + 99, + 885, + 229 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 115, + 254, + 272, + 273 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Benchmarks We follow the previous work (Lin et al., 2025; Xin et al., 2024; Wang et al., 2024) and mainly validate the effectiveness of our proposed method on the most commonly-used MiniF2F-test (Zheng et al., 2021).", + "bbox": [ + 115, + 285, + 885, + 334 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Results on MiniF2F. We have two versions of our models posttrained from two strong prover models: Deepseek-Prover-v1.5-SFT and Goedel-Prover-SFT, namely Leanabell-Prover-DS and Leanabell-Prover-GD. We mainly compare current whole proof generation methods, while ignore those with proof-step methods using far more inference-compute. As shown in Table 2, our posttraining framework boosts both DeepSeek-Prover-v1.5-SFT and Goedel-Prover-SFT models. On the minimum sample budget, Leanabell-Prover-GD-RL achieves the SOTA of MiniF2F (59.8% on pass@32), which gains up to 2.2% (i.e. from Goedel-Prover SFT, from 57.6% to 59.8%). We can observe that following our continual training phase, our method (Leanabell-Prover-DS-SFT) shows improvement over its base model (DeepSeek-Prover-v1.5-SFT), and the RL version continues to effectively enhance its performance. Meanwhile, Leanabell-Prover-GD-SFT performs almost identically to Leanabell-Prover-DS-SFT. This is reasonable, as Goedel-Prover-SFT is finetuned from DeepSeek-Pover-v1.5-base, with a significantly larger amount of data compared to our continual training stage. Therefore, our continual training on Leanabell-Prover-GD-SFT primarily adjusts the model's reasoning ability across different math domain distributions and incorporates the proper CoT format with cognitive behaviors into the current training data. This makes the checkpoint more conducive to RL training.", + "bbox": [ + 115, + 360, + 885, + 619 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We also increase the sampling budget to compare the performance gains. For the SFT models, as can be seen, the baseline DeepSeek-Prover-v1.5-SFT achieves around $2.2\\%$ performance gains (i.e., $48.2\\%$ to $50.4\\%$ ), as increasing sampling budget from 32 to 128. Within our configurations, our Leanabell-Prover-DS-SFT and Leanabell-Prover-GD-SFT models also achieve $1.8\\%$ (i.e., $54.9\\%$ to $56.7\\%$ ) and $1.2\\%$ (i.e., $58.2\\%$ to $59.4\\%$ ) performance gains on the same inference scaling experiments, respectively. For the RL models, DeepSeek-Prover-v1.5-RL achieves $1.6\\%$ performance gains (i.e., $50.0\\%$ to $51.6\\%$ ), while our Leanabell-Prover-DS-RL achieves more gains (i.e., $56.6\\%$ to $59.0\\%$ ). Therefore, after the model has undergone SFT and RL training, our models still maintain the exploration capabilities.", + "bbox": [ + 115, + 625, + 885, + 771 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Exploration Abilities and RL. We first examine our two SFT models, with their pass@16 accuracy at different sampling temperatures. This metric can serve as an indicator of the policy's exploration ability and is particularly relevant for RL, as it reflects the policy's ability to generate responses that can achieve a positive reward. As shown in Figure 3, we find both SFT models are exploratory, and thus ready for RL. The RL training rewards are shown in Figure 4. During our experiments, we also compared the original GRPO with Dr. GRPO (Liu et al., 2025), and", + "bbox": [ + 115, + 796, + 885, + 894 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 492, + 923, + 504, + 934 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/2fc90dfe1c7eb7a35b8a8ef67fc0f69ff7ff4653848471c031d9155572184687.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodSample budgetminiF2F-test
TheoremLlama [28]12833.6%
DeepSeek-Prover-v1 [32]12846.1% ± 0.5%
DeepSeek-Prover-v1.5-Base [33]12829.7% ± 0.5%
320039.2%
640042.2%
DeepSeek-Prover-v1.5-SFT [33]3248.2% ± 0.6%
6449.6% ± 0.7%
12850.4% ± 0.4%
320053.3% ± 0.5%
DeepSeek-Prover-v1.5-RL [33]3250.0% ± 0.5%
6450.7% ± 0.4%
12851.6% ± 0.5%
320054.9% ± 0.7%
STP [7]12857.7% ± 0.6%
320061.7% ± 0.6%
Goedel-Prover-SFT [15]3257.6% ± 0.7%
320062.7%
Leanabell-Prover-DS-SFT3254.9%
6455.3%
12856.7%
Leanabell-Prover-DS-RL3256.6%
6457.4%
12859.0%
Leanabell-Prover-GD-SFT3258.2%
6459.0%
12859.4%
Leanabell-Prover-GD-RL3259.8%
6460.7%
12861.1%
", + "bbox": [ + 201, + 200, + 796, + 708 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 2 | Comparison with state-of-the-art methods on the miniF2F-test dataset. The notation $\\mu \\pm \\sigma$ denotes the average accuracy $\\mu$ and the standard deviation $\\sigma$ . \"DS\" and \"GD\" refer to using the DeepSeek-Prover-v1.5-SFT and Goedel-Prover-SFT as base models to continue SFT and RL training, respectively.", + "bbox": [ + 112, + 717, + 884, + 784 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 487, + 923, + 510, + 935 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/01687d0709017cc46d9129128904e3099ce96b65ce6ffb05bc736ea378baf0ac.jpg", + "image_caption": [ + "Figure 3 | Exploration ability: pass@16 measures how well base models explore." + ], + "image_footnote": [], + "bbox": [ + 309, + 102, + 687, + 337 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/7f4f27568a48a2be4320d9e2cbf50395a8c162d8e7903c6e2cea1fd88a15c8ad.jpg", + "image_caption": [ + "Figure 4 | Left: Reward curve during training Leanabell-Prover-Prover-DS-RL. Right: Reward curve during training Leanabell-Prover-Prover-GD-RL." + ], + "image_footnote": [], + "bbox": [ + 119, + 381, + 480, + 589 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/33a464e26cdd80f93c82ec770c3dc8cb34fc16b34875ff02895ac22cb7f8d7e9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 381, + 875, + 589 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "found that the training dynamics remained largely consistent under these two RL training algorithms. This may be attributed to the fact that the length of different rollout responses, regardless of whether they are correct or incorrect, does not vary significantly in formal language reasoning. We have also observed that selecting an appropriate prompt set is crucial for RL training. Merely using pass@N as the sole criterion is insufficient to unlock the full potential of RL. As shown in Figure 5, we analyze the distributions of error problems across different source types in the MiniF2F-test set. We observed that, based on DeepSeek-Prover-v1.5-SFT, errors can be reduced across all data source types in MiniF2F-test set, especially for AMC, MATH, and CUSTOM. However, this improvement is significantly reduced in optimization results based on Goedel-Prover-SFT. This suggests that such as the intrinsic difficulty level of the statements (e.g., whether they are at the AIME or IMO level), the coverage of mathematical domains, and the balance with the prover model's capabilities, are also important.", + "bbox": [ + 112, + 659, + 884, + 854 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 487, + 922, + 507, + 935 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/35e63804bae7aac8349c0715b1dcd16bb5dda356dba66e77102f715225f4ac8d.jpg", + "image_caption": [ + "Figure 5 | Distribution of problem types that failed verification on the MiniF2F-test set." + ], + "image_footnote": [], + "bbox": [ + 117, + 99, + 484, + 258 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/c7758c7a762f4643a6567b155f612a84f49906540bd173b97b882308eb809395.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 514, + 99, + 880, + 259 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "5. Conclusion, Limitation, and Future Work", + "text_level": 1, + "bbox": [ + 112, + 309, + 556, + 326 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We present a series of ATP models, named Leanabell-Proverseries, by investigating the posttraining scaling of current provers. Leanabell-Prover is started with DeepSeek-Prover-v1.5-SFT and Goedel-Prover-SFT which are two well-trained whole-proof generation models. We first collect a large collection of formalized statements with proofs to continue training. More importantly, we embed cognitive behaviors into the base models by applying a second-stage continual training on such synthetic data. With such prepared SFT models, we finally achieve the final performance through the RL optimization.", + "bbox": [ + 112, + 340, + 884, + 454 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Limitations. As we find that the base prover model (i.e., DeepSeek-Prover-v1.5) is a weak LLM compared to those used for posttraining scaling in natural languages, such as Deepseek v3 and the Qwen2.5 family. Although we have piloted to integrate cognitive behaviors into the model and selected the prompt set of RL according to the ability of our SFT models, the expected stronger RL performance has not fully materialized. Our findings right now are more in line with the replication on weak LLMs with RL (Liu et al., 2025; Zeng et al., 2025). Moreover, although we achieved performance gains, we observe that the integrated self-reflection capacities demonstrate a weakening trend after RL training. This suggests that effectively integrating cognitive behaviors into such weak base LLMs remains highly challenging.", + "bbox": [ + 112, + 479, + 884, + 625 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Future Work. Our goal is to fully invoke formal reasoning abilities, and thus we will continue to explore the following directions (hopefully can achieve some improvement):", + "bbox": [ + 112, + 650, + 882, + 683 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Bridging formal reasoning with natural languages. Reasoning in formal languages has natural benefits for theorem proving, including no mathematical knowledge hallucination, and all steps and states with verified feedback. However, reasoning abilities of current formal provers (including our current work) still lag behind those of natural language reasoning models. We have made the first step to design a CoT template and synthesize data accordingly, which is intended to insert natural language information to help formal reasoning. We also tried Deepseek R1 with prompting for formal proof generation, which achieves $51.6\\%$ (pass@32) on MiniF2F-test. Therefore, we hope to develop more effective manners that can transfer the math knowledge and reasoning abilities in natural languages into formal proof generation.", + "- Bridging whole proof generation with proof-step methods. We believe current RL framework can help bridging these two lines of methods. For example, we can replace the" + ], + "bbox": [ + 139, + 701, + 884, + 895 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 487, + 923, + 510, + 935 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "sampling-based response-level rollout in RL into proof-step rollout for better exploration (but still rollout into a whole proof or reach the maximum length, then calculate the response-level reward), thus improving the learning efficiency of the RL training stage.", + "bbox": [ + 157, + 99, + 884, + 151 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 115, + 174, + 228, + 191 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Anthropic. Claude 3.7 Sonnet System card. 2025. URL https://www.anthropic.com/news/claudi-3-7-sonnet.", + "[2] AoPS. Art of problem solving. https://artofproblemsolving.com/. Accessed: [date].", + "[3] Z. Azerbayev, H. Schoelkopf, K. Paster, M. D. Santos, S. McAleer, A. Q. Jiang, J. Deng, S. Biderman, and S. Welleck. LLemma: An open language model for mathematics. arXiv preprint arXiv:2310.10631, 2023.", + "[4] R. Coulom. Efficient selectivity and backup operators in monte-carlo tree search. In International conference on computers and games, pages 72-83. Springer, 2006.", + "[5] L. De Moura, S. Kong, J. Avigad, F. Van Doorn, and J. von Raumer. The Lean theorem prover (system description). In International Conference on Automated Deduction (CAD), 2015.", + "[6] DeepMind. Alphaproof and Alphageometry, July 2024. URL https://deepmind.google.de/discover/blog/ai-solves-imo-problems-at-silver-medal-level/.", + "[7] K. Dong and T. Ma. Stp: Self-play llm theorem provers with iterative conjecturing and proving. arXiv preprint arXiv:2502.00212, 2025.", + "[8] K. Gandhi, A. Chakravarthy, A. Singh, N. Lile, and N. D. Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars. arXiv preprint arXiv:2503.01307, 2025.", + "[9] D. Guo, D. Yang, H. Zhang, J. Song, R. Zhang, R. Xu, Q. Zhu, S. Ma, P. Wang, X. Bi, et al. Deepseek-R1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025.", + "[10] B. Hui, J. Yang, Z. Cui, J. Yang, D. Liu, L. Zhang, T. Liu, J. Zhang, B. Yu, K. Dang, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024.", + "[11] G. Lample, T. Lacroix, M.-A. Lachaux, A. Rodriguez, A. Hayat, T. Lavril, G. Ebner, and X. Martinet. Hypertree proof search for neural theorem proving. Advances in Neural Information Processing Systems (NeurIPS), 35, 2022.", + "[12] J. Li, E. Beeching, L. Tunstall, B. Lipkin, R. Soletskyi, S. C. Huang, K. Rasul, L. Yu, A. Jiang, Z. Shen, Z. Qin, B. Dong, L. Zhou, Y. Fleureau, G. Lample, and S. Polu. Numinamath, 2024.", + "[13] Y. Li, D. Du, L. Song, C. Li, W. Wang, T. Yang, and H. Mi. Hunyuanprover: A scalable data synthesis framework and guided tree search for automated theorem proving. arXiv preprint arXiv:2412.20735, 2024.", + "[14] H. Lin, Z. Sun, Y. Yang, and S. Welleck. Lean-star: Learning to interleave thinking and proving. arXiv preprint arXiv:2407.10040, 2024." + ], + "bbox": [ + 115, + 205, + 885, + 876 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 487, + 922, + 509, + 935 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[15] Y. Lin, S. Tang, B. Lyu, J. Wu, H. Lin, K. Yang, J. Li, M. Xia, D. Chen, S. Arora, et al. Goedelprover: A frontier model for open-source automated theorem proving. arXiv preprint arXiv:2502.07640, 2025.", + "[16] Z. Liu, C. Chen, W. Li, P. Qi, T. Pang, C. Du, W. S. Lee, and M. Lin. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783, 2025.", + "[17] mathlib4. The math library of lean 4, 2025. URL https://github.com/leanprover-community/mathlib4.", + "[18] L. d. Moura and S. Ullrich. The lean 4 theorem prover and programming language. In International Conference on Automated Deduction, 2021.", + "[19] L. C. Paulson. Isabelle: A generic theorem prover. Springer, 1994.", + "[20] S. Polu and I. Sutskever. Generative language modeling for automated theorem proving. arXiv preprint arXiv:2009.03393, 2020.", + "[21] S. Polu, J. M. Han, K. Zheng, M. Baksys, I. Babuschkin, and I. Sutskever. Formal mathematics statement curriculum learning. arXiv preprint arXiv:2202.01344, 2022.", + "[22] R. Rafailov, A. Sharma, E. Mitchell, C. D. Manning, S. Ermon, and C. Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems (NeurIPS), 2023.", + "[23] Z. Shao, P. Wang, Q. Zhu, R. Xu, J. Song, X. Bi, H. Zhang, M. Zhang, Y. Li, Y. Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024.", + "[24] Q. Team. Qwen2.5: A party of foundation models, September 2024. URL https://qwen.lm.github.io/blog/qwen2.5/.", + "[25] Q. Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025. URL https://qwenlm.github.io/blog/qwq-32b/.", + "[26] T. H. Trinh, Y. Wu, Q. V. Le, H. He, and T. Luong. Solving olympiad geometry without human demonstrations. Nature, 625(7995):476-482, 2024.", + "[27] Z. Wan, Y. Li, Y. Song, H. Wang, L. Yang, M. Schmidt, J. Wang, W. Zhang, S. Hu, and Y. Wen. Rema: Learning to meta-think for llms with multi-agent reinforcement learning. arXiv preprint arXiv:2503.09501, 2025.", + "[28] R. Wang, J. Zhang, Y. Jia, R. Pan, S. Diao, R. Pi, and T. Zhang. Theoremlama: Transforming general-purpose llms into lean4 experts. arXiv preprint arXiv:2407.03203, 2024.", + "[29] R. Wang, R. Pan, Y. Li, J. Zhang, Y. Jia, S. Diao, R. Pi, J. Hu, and T. Zhang. Ma-lot: Multiagent lean-based long chain-of-thought reasoning enhances formal theorem proving. arXiv preprint arXiv:2503.03205, 2025.", + "[30] Z. Wu, S. Huang, Z. Zhou, H. Ying, J. Wang, D. Lin, and K. Chen. Internl m2.5-Stepprover: Advancing automated theorem proving via expert iteration on large-scale lean problems. arXiv preprint arXiv:2410.15700, 2024.", + "[31] Z. Wu, J. Wang, D. Lin, and K. Chen. Lean-github: Compiling github lean repositories for a versatile lean prover. arXiv preprint arXiv:2407.17227, 2024." + ], + "bbox": [ + 114, + 99, + 884, + 898 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 487, + 923, + 509, + 935 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[32] H. Xin, D. Guo, Z. Shao, Z. Ren, Q. Zhu, B. Liu, C. Ruan, W. Li, and X. Liang. Deepseek-prover: Advancing theorem proving in llms through large-scale synthetic data. arXiv preprint arXiv:2405.14333, 2024.", + "[33] H. Xin, Z. Ren, J. Song, Z. Shao, W. Zhao, H. Wang, B. Liu, L. Zhang, X. Lu, Q. Du, et al. Deepseek-prover-v1. 5: Harnessing proof assistant feedback for reinforcement learning and monte-carlo tree search. arXiv preprint arXiv:2408.08152, 2024.", + "[34] R. Xin, C. Xi, J. Yang, F. Chen, H. Wu, X. Xiao, Y. Sun, S. Zheng, and K. Shen. Bfs-prover: Scalable best-first tree search for llm-based automatic theorem proving. arXiv preprint arXiv:2502.03438, 2025.", + "[35] K. Yang, A. Swope, A. Gu, R. Chalamala, P. Song, S. Yu, S. Godil, R. J. Prenger, and A. Anandkumar. Leandojo: Theorem proving with retrieval-augmented language models. Advances in Neural Information Processing Systems (NeurIPS), 2023.", + "[36] H. Ying, Z. Wu, Y. Geng, J. Wang, D. Lin, and K. Chen. Lean workbook: A large-scale lean problem set formalized from natural language math problems. arXiv preprint arXiv:2406.03847, 2024.", + "[37] W. Zeng, Y. Huang, Q. Liu, W. Liu, K. He, Z. Ma, and J. He. Simplerl-zoo: Investigating and taming zero reinforcement learning for open base models in the wild. arXiv preprint arXiv:2503.18892, 2025.", + "[38] X. Zhao, W. Wu, J. Guan, and L. Kong. Promptcot: Synthesizing olympiad-level problems for mathematical reasoning in large language models. arXiv preprint arXiv:2503.02324, 2025.", + "[39] K. Zheng, J. M. Han, and S. Polu. Minif2f: a cross-system benchmark for formal olympiad-level mathematics. arXiv preprint arXiv:2109.00110, 2021." + ], + "bbox": [ + 114, + 99, + 885, + 531 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 487, + 922, + 510, + 935 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Appendix", + "text_level": 1, + "bbox": [ + 114, + 99, + 220, + 118 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A. Formalizer Details", + "text_level": 1, + "bbox": [ + 114, + 130, + 337, + 146 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We start with Qwen25-Coder-32B-Instruct (Hui et al., 2024) and use following instruct prompt to train the formalizer:", + "bbox": [ + 112, + 161, + 882, + 193 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Formalizer Prompt", + "text_level": 1, + "bbox": [ + 421, + 212, + 574, + 224 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Please translate the mathematical statement {informal_statement} into a theorem statement in Lean 4 code.", + "bbox": [ + 134, + 229, + 836, + 253 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Please do not generate codes of proof or comment sentences (e.g., starting with '/-' or '-').", + "bbox": [ + 134, + 262, + 823, + 286 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The Lean 4 codes are required to complete the 'statement' in the following text:", + "bbox": [ + 134, + 296, + 764, + 310 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "\" ' lean4", + "bbox": [ + 136, + 326, + 218, + 340 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "theorem lean_workbook 'statement' := by sorry", + "bbox": [ + 136, + 342, + 574, + 365 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "As shown in Table 3, the formalizer is targeted to translate the natural language statement to formal statement in Lean 4 codes. We take use of the 29.7K data released by Goedel-Prover (Lin et al., 2025), which provides pairs of informal statement and formal statement in each sample. We train the formalizer with a fixed learning rate $5 \\times 10^{-6}$ for 2 epochs. We verify the Compiling Correctness (CC) Test, and Faithfulness and Completeness (FC) Test by following the prompts in Goedel-Prover. As shown in Table 4, our formalizer performs similarly to the Formalizer A (Lin et al., 2025).", + "bbox": [ + 110, + 409, + 884, + 521 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/81ecbadfa92856e0289f8658a20571661770530cab22ef47491919d27bd74bce.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Example 1Example 2
Informal StatementSolve for x in the given inequality: x2-2x-24<0Prove that ln(eπ) is equal to π.
Formalizer Outputtheorem lean_workbook (x : R): x^2 - 2*x - 24 < 0 ↔ x ∈ Set.Ioo (-4) 6 := by sorrytheorem lean_workbook : Real.log (Real.exp π) = π := by sorry
", + "bbox": [ + 115, + 532, + 884, + 694 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/3d92cbecdced18c0f702bb3e88a457e85c4a980d84f653daaa42c640ec2b117d.jpg", + "table_caption": [ + "Table 3 | Examples of formalizer inputs and outputs for two examples." + ], + "table_footnote": [], + "table_body": "
ModelCC Test (%)FC Test (%)
Formalizer A (Lin et al., 2025)76.748.1
Formalizer B (Lin et al., 2025)88.580.4
Ours Formalizer77.649.0
", + "bbox": [ + 252, + 739, + 746, + 821 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table 4 | Quality assessment of the formalized statement. \"CC\" refers to Compiling Correctness (CC) Test and \"FC\" refers to Faithfulness and Completeness (FC) Test.", + "bbox": [ + 112, + 831, + 882, + 864 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 487, + 923, + 509, + 935 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B. Cognitive Behaviors Design", + "text_level": 1, + "bbox": [ + 115, + 99, + 428, + 118 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "B.1. Lean Completion Example", + "text_level": 1, + "bbox": [ + 115, + 131, + 381, + 149 + ], + "page_idx": 16 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Input:" + ], + "code_body": "Complete the following Lean 4 code with explanatory comments preceding each line of code: \n' \\(\\mathit{lean4}\\) \nimport Mathlib \nimport Aesop \nset_option maxHeartbeats O \nopen BigOperators Real Nat Topology Rat \n/\\~ Given \\(\\sin (\\frac{\\pi}{4} -x) = \\frac{3}{5}\\) , find the value of sin2x. Show that it is \\(\\backslash\\) frac{7}{25}. - / theorem sin_2x_value (x : R) (h : sin (Real.pi / 4 - x) = 3 / 5) : sin (2 * x) \\(= 7 / 25\\coloneqq\\) by \n/\\~ Given \\(\\backslash (\\backslash \\sin \\backslash \\text{left} (\\backslash \\text{frac} (\\backslash \\text{pi})\\{4\\} -\\text{x}\\backslash \\text{right}) = \\backslash \\text{frac} (3)\\{5\\} \\backslash)\\) , we need to find the value of \\(\\backslash (\\backslash \\sin 2x\\backslash)\\) . We start by using the trigonometric identity for the sine of a difference and the double-angle formula for sine. Specifically, we use the identity \\(\\backslash (\\backslash \\sin (a - b) = \\backslash \\sin a\\backslash \\cos b - \\backslash \\cos a\\backslash \\sin b\\backslash)\\) and the double-angle formula \\(\\backslash (\\backslash \\sin 2x = 2\\backslash \\sin x\\backslash \\cos x\\backslash)\\) . 1. Substitute \\(\\backslash (\\backslash \\sin \\backslash \\text{left} (\\backslash \\text{frac} (\\backslash \\text{pi})\\{4\\} -\\text{x}\\backslash \\text{right}) = \\backslash \\text{frac} (3)\\{5\\} \\backslash)\\) into the identity for the sine of a difference: [ \\\\sin \\(\\backslash\\) left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right) \\(=\\) \\\\sin \\(\\backslash\\) left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right) \\(=\\) \\\\sin \\(\\backslash\\) left(a-b) \\(=\\) \\\\sin a\\cos b- cos a\\sin b) and the double-angle formula \\(\\backslash (\\backslash \\sin 2x = 2\\backslash \\sin x\\backslash \\cos x\\backslash)\\) \n] \n2. Calculate \\(\\backslash (\\backslash \\sin \\backslash \\text{left} (\\backslash \\text{frac} (\\backslash \\text{pi})\\{4\\} -\\text{x}\\backslash \\text{right}) = \\backslash \\text{frac} (3)\\{5\\} \\backslash)\\) into \\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right) and \\\\( (\\cos \\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right)\\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right)\\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right)\\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right)\\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right)\\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x", + "guess_lang": "txt", + "bbox": [ + 114, + 180, + 882, + 894 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 487, + 922, + 509, + 935 + ], + "page_idx": 16 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "\\sin 2x = 2 \\sin \\left(\\text {left} (\\text {x} \\text {right}) \\cos \\left(\\text {left} (\\text {x} \\text {right}) \\right.\\right) \n\\] \n7. Substitute $\\backslash (\\backslash \\cos \\backslash \\text {left} (\\text {x} \\backslash \\text {right}) - \\backslash \\sin \\backslash \\text {left} (\\text {x} \\backslash \\text {right}) = \\backslash \\operatorname{frac}\\{3$ $\\backslash \\operatorname{sqrt}\\{2\\} \\} \\{5\\} \\backslash)$ into the equation: \n\\[\n\\begin{array}{l}\n\\backslash [ \\\\\n\\sin 2x = 2 \\backslash \\sin \\backslash \\text {left} (\\text {x} \\backslash \\text {right}) \\cos \\backslash \\text {left} (\\text {x} \\backslash \\text {right}) = 2 \\backslash \\operatorname{left} (\\backslash \\operatorname{frac}\\{3\n\\} \\backslash \\operatorname{sqrt}\\{2\\}) \\{5\\} \\backslash \\text {right}) \\backslash \\operatorname{left} (\\backslash \\operatorname{frac}\\{\\backslash \\operatorname{sqrt}\\{2\\}\\} \\{2\\} \\backslash \\text {right}) = 2 \\backslash \\operatorname{cdot} \\backslash \\operatorname{frac}\\{3\n\\} \\backslash \\operatorname{sqrt}\\{2\\}) \\{5\\} \\backslash \\operatorname{cdot} \\backslash \\operatorname{frac}\\{\\backslash \\operatorname{sqrt}\\{2\\}\\} \\{2\\} = \\backslash \\operatorname{frac}\\{3 \\backslash \\operatorname{sqrt}\\{2\\}\\} \\{5\\} \\backslash \\operatorname{cdot}\n\\end{array}\n\\] \n $\\backslash$ \n8. Simplify the final expression: \n\\[\n\\begin{array}{l}\n\\backslash [ \\\\\n\\sin 2x = \\backslash \\operatorname{frac}\\{7\\} \\{25\\}\n\\end{array}\n\\]", + "guess_lang": "latex", + "bbox": [ + 115, + 99, + 884, + 317 + ], + "page_idx": 17 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Wrong Proof:" + ], + "code_body": "-- Use the trigonometric identity for the sine of a difference and the double-angle formula for sine. \nsimp [Real.sin_sub, Real.sin_two.mul, Real.cos_two.mul, Real.sin(pi_div_four, Real.cos(pi_div_four] at h $\\vdash$ \n-- Simplify the expression using field operations. \nfield_simp at h $\\vdash$ \n-- Normalize the expression by expanding and simplifying it. \nring_nf at h $\\vdash$ \n-- Use non-linear arithmetic to verify the final result. \nnlinarith [Real.sin_sq_add_cos_sq x, Real.sin_le_one x, Real.cos_le_one x]", + "guess_lang": "txt", + "bbox": [ + 115, + 356, + 884, + 502 + ], + "page_idx": 17 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Tactic Error Messages:" + ], + "code_body": "linarith failed to find a contradiction \ncase h1.h \nx: $\\mathbb{R}$ \nh: $\\sqrt{2} * x.\\cos * 5 - \\sqrt{2} * x.\\sin * 5 = 6$ \nat: x.cos * x.sin * 50 < 7 \n $\\vdash$ False \nfailed", + "guess_lang": "txt", + "bbox": [ + 114, + 541, + 499, + 645 + ], + "page_idx": 17 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Searched Valid Proof:" + ], + "code_body": "-- Use the trigonometric identity for the sine of a difference and the double-angle formula for sine. \nsimp [Real.sin_sub, Real.sin_two.mul, Real.cos_two.mul, Real.sin(pi_div_four, Real.cos(pi_div_four] at h $\\vdash$ \n-- Simplify the expression using field operations. \nfield_simp at h $\\vdash$ \n-- Normalize the expression by expanding and simplifying it. \nring_nf at h $\\vdash$ \n-- Use the Pythagorean identity to simplify further. \nnlinarith [Real.sin_sq_add_cos_sq x, Real.sqrt_nonneg 2, sq_sqrt (show $0 \\leq$ 2 by norm_num)]\"", + "guess_lang": "sql", + "bbox": [ + 115, + 686, + 884, + 844 + ], + "page_idx": 17 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Claude CoTs:" + ], + "code_body": "", + "guess_lang": "txt", + "bbox": [ + 115, + 887, + 186, + 900 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 487, + 922, + 509, + 935 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "let's verify this proof to make sure it's working correctly. I need to check each step of the proof to ensure it's valid and identify any potential issues.", + "bbox": [ + 114, + 101, + 865, + 145 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "First, let's examine what we're trying to prove. It appears to be a trigonometric identity involving sine and cosine functions.", + "bbox": [ + 114, + 158, + 769, + 189 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The proof begins with:", + "bbox": [ + 114, + 200, + 329, + 223 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "simp [Real.sin_sub, Real.sin_two.mul, Real.cos_two.mul, Real.sin(pi_div_four, Real.cos(pi_div_four] at h $\\vdash$", + "bbox": [ + 114, + 229, + 860, + 267 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "This step seems reasonable - we're using standard trigonometric identities to simplify both the hypothesis $h$ and the goal.", + "bbox": [ + 114, + 272, + 863, + 302 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Next, we have:", + "bbox": [ + 114, + 315, + 253, + 337 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "field_simp at h", + "bbox": [ + 114, + 343, + 280, + 366 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "This tactic simplifies expressions involving field operations, which is appropriate for real-valued trigonometric expressions.", + "bbox": [ + 114, + 370, + 805, + 401 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Then:", + "bbox": [ + 114, + 414, + 164, + 437 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "ring_nf at h", + "bbox": [ + 114, + 443, + 253, + 464 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "This normalizes ring expressions, which should help simplify our algebraic expressions further.", + "bbox": [ + 114, + 470, + 835, + 500 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Finally, we have:", + "bbox": [ + 114, + 514, + 280, + 536 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "nlinarith [Real.sin_sq_add_cos_sq x, Real.sin_le_one x, Real.cos_le_one x]", + "bbox": [ + 114, + 541, + 831, + 565 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "This is where I need to be careful. The 'nlinarith' tactic is trying to use non-linear arithmetic with the Pythagorean identity $\\sin^2 (x) + \\cos^2 (x) = 1'$ and the bounds on sine and cosine functions.", + "bbox": [ + 114, + 583, + 868, + 627 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The issue might be that we're using bounds ('Real.sin_le_one x, Real.cos_le_one x') rather than equalities. When dealing with trigonometric identities, we often need exact relationships rather than just bounds. The Pythagorean identity is an equality, but we're also including inequalities in our reasoning.", + "bbox": [ + 114, + 640, + 840, + 714 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "A better approach would be to focus on using the Pythagorean identity directly, possibly with additional facts about square roots. The `sqrt(2)` in our expressions suggests we might need properties about square roots, such as `sq_sqrt` to handle expressions like `(\\sqrt{2})^{\\sim}`.", + "bbox": [ + 114, + 726, + 865, + 785 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Additionally, we should ensure that we're providing all the necessary facts to the arithmetic solver. For instance, we might need to explicitly include that '\\sqrt{2} \\geq 0'\\), which could be done with 'Real.sqrt_nonneg \\(2'\\).", + "bbox": [ + 114, + 797, + 878, + 841 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 487, + 922, + 509, + 935 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "B.2. Rewriting Example", + "text_level": 1, + "bbox": [ + 115, + 101, + 319, + 118 + ], + "page_idx": 19 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Input:" + ], + "code_body": "Complete the following Lean 4 code with explanatory comments preceding each line of code: \n```\n```\n```lean4\nimport Mathlib\nimport Aesop\nset_option maxHeartbeats 0\nopen BigOperators Real Nat Topology Rat\n-- Prove that if $n \\geq 2$ , then $n$ distinct integers can be given such that the sum of any two of them is divisible by their difference.-/\ntheorem lean_workbook : ∀ n : N, 2 ≤ n → ∃ l : List Z, l.length = n ∧ ∀ x y, x ∈ l ∧ y ∈ l ∧ x ≠ y → (x + y) % (x - y) = 0 := by", + "guess_lang": "txt", + "bbox": [ + 114, + 148, + 882, + 368 + ], + "page_idx": 19 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Wrong Proof:" + ], + "code_body": "- We need to prove that for any natural number $(n \\backslash \\text{geq} 2)$ , there exists a list of $(n \\backslash)$ distinct integers such that the sum of any two integers in the list is divisible by their difference. \n1. **Base Case**: When $(n = 2)$ , we can choose the list $([0, 1])$ . For any $(x \\backslash)$ and $(y \\backslash)$ in this list, if $(x \\backslash \\text{neq} y)$ , then $(x + y) \\mod (x - y) = 0$ . \n2. **Inductive Step**: Assume the statement holds for some $(n \\backslash \\text{geq} 2)$ . We need to show it holds for $(n + 1)$ . By the inductive hypothesis, there exists a list of $(n \\backslash)$ distinct integers $(1 \\backslash)$ such that the sum of any two elements in $(1 \\backslash)$ is divisible by their difference. We can add one more integer to this list and show that the new list still satisfies the condition. \n- / \nintro n hn \n-- Use a base case for $n = 2$ \nuse [0, 1] \n-- Verify that the base case satisfies the conditions \nsimp \n-- Use Aesop to handle the general case \n<;> aesop", + "guess_lang": "latex", + "bbox": [ + 114, + 403, + 882, + 708 + ], + "page_idx": 19 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Revised Comments and Proofs:" + ], + "code_body": "/\\-We need to prove that for any natural number \\(\\backslash\\) (n\\geq 2 \\), there exists a list of \\(\\backslash\\) (n \\) distinct integers such that the sum of any two integers in the list is divisible by their difference. \n1. \\*\\*Base Case**: When \\(\\backslash\\) ( \\(n = 2\\backslash\\) ), we can choose the list \\(\\backslash ([0,2]\\backslash)\\). For any \\(\\backslash\\) (x \\) and \\(\\backslash\\) y \\) in this list, if \\(\\backslash\\) (x\\neq y \\), then \\(\\backslash\\) (x + y) mod (x - y) = 0 \\). For example, \\(\\backslash\\) (0 + 2) mod (0 - 2) = 2 \\mod -2 = 0 \\). \n2. \\*\\*Inductive Step**: Assume the statement holds for some \\(\\backslash\\) (n\\geq 2 \\). We need to show it holds for \\(\\backslash\\) (n + 1 \\). By the inductive hypothesis, there", + "guess_lang": "txt", + "bbox": [ + 114, + 743, + 882, + 904 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 487, + 923, + 509, + 935 + ], + "page_idx": 19 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "exists a list of \\(\\backslash\\) (n \\(\\backslash\\)) distinct integers \\(\\backslash\\) (\\{a_1, a_2, \\ldots, a_n\\} \\}) such that the sum of any two elements in this list is divisible by their difference. We can add one more integer to this list to form a new list of \\(\\backslash\\) (n + 1 \\(\\backslash\\)) integers. A good choice for the new integer is \\(\\backslash\\) (a_n+1 \\() =\\) a_n + (n+1)! \\)). This ensures that for any \\(\\backslash\\) (i \\(\\backslash\\)) and \\(\\backslash\\) (j \\(\\backslash\\)) in the range \\(\\backslash\\) (1 \\(\\backslash\\) leq i, j \\(\\backslash\\) leq n+1 \\(\\backslash\\)) with \\(\\backslash\\) (i \\(\\backslash\\) neq j \\(\\backslash\\)), the sum \\(\\backslash\\) (a_i + a_j \\(\\backslash\\)) is divisible by their difference \\(\\backslash\\) (a_i - a_j \\(\\backslash\\)). This is because \\(\\backslash\\) (a_i + a_j) - (a_i - a_j) = 2a_j \\), and since \\(\\backslash\\) (a_j \\(\\backslash\\)) is an integer, \\(\\backslash\\) (2a_j \\(\\backslash\\)) is always divisible by \\(\\backslash\\) (a_i - a_j \\)). Thus, the list \\(\\backslash\\) (\\{a_1, a_2, \\ldots, a_ldots, a_n, a_{\\{n+1\\}}\\} \\}) satisfies the condition, completing the inductive step. \n- \n- Introduce the natural number \\(n\\) and the assumption that \\(n \\geq 2\\) \nintro n hn \n- Use a construction to construct the list of integers \nuse List.replicate n 0 \n- Simplify the conditions given the assumption \\(n \\geq 2\\) \nsimp [hn, List.length, List.mem_replicate, List.mem_range, List.mem_filter] \n- Use Aesop to complete the proof \n<;> aesop\"", + "guess_lang": "txt", + "bbox": [ + 115, + 99, + 884, + 406 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Claude CoTs:", + "text_level": 1, + "bbox": [ + 112, + 420, + 216, + 434 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Let me verify my proof for this problem about distinct integers with divisibility properties...", + "bbox": [ + 115, + 443, + 776, + 472 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Looking at my current solution, I'm trying to prove that for any $n \\geq 2$ , there exist $n$ distinct integers where the sum of any two is divisible by their difference.", + "bbox": [ + 115, + 486, + 865, + 527 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Wait, I see an issue here. I've used '[0, 1]' as my example list, but let me check if this actually satisfies the condition:", + "bbox": [ + 115, + 542, + 855, + 570 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "- For $x = 0$ and $y = 1$ : $(x + y) \\% (x - y) = (0 + 1) \\% (0 - 1) = 1 \\% (-1) = 0$ ?", + "bbox": [ + 115, + 571, + 853, + 586 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Let me verify this calculation. We have $1\\%$ (-1) which should be the remainder when 1 is divided by -1. Since -1 divides 1 as $-1 \\times (-1) = 1$ , the remainder is 0. So this seems correct.", + "bbox": [ + 115, + 599, + 840, + 640 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "But wait, there's another issue. I've only constructed a list for $n = 2$ , but my proof needs to work for any $n \\geq 2$ . The line:", + "bbox": [ + 115, + 656, + 853, + 692 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "use [0, 1]", + "bbox": [ + 115, + 699, + 213, + 721 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "only provides a list of length 2, but I need to handle cases where $n > 2$ as well.", + "bbox": [ + 115, + 727, + 843, + 755 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "I need to generalize my construction. Perhaps I should create a list of length $n$ instead of just assuming the base case will handle everything. The 'simp' and 'aesop' tactics might not be able to automatically construct this list for arbitrary $n$ .", + "bbox": [ + 115, + 770, + 836, + 827 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Let me reconsider this approach... What if I use a more general construction? I could use 'List.replicate n 0' to create a list of $\\mathbf{n}$ zeros. But then all elements would be the same, and the problem requires distinct integers.", + "bbox": [ + 115, + 841, + 867, + 884 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 487, + 922, + 507, + 935 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Actually, I should think more carefully about what list would satisfy the divisibility condition. If I have integers $a$ and $b$ where $a \\neq b$ , then I need $(a + b) \\% (a - b) = 0$ .", + "bbox": [ + 114, + 101, + 833, + 146 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "For the general case, I might need to use a more sophisticated construction like arithmetic sequences or something based on factorials to ensure the divisibility property holds.", + "bbox": [ + 114, + 158, + 848, + 202 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The issue with my current proof is that I'm not providing a general construction for $n$ elements - I'm only handling $n = 2$ . And even if 'aesop' could somehow extend this, I haven't given it enough guidance on how to construct the larger list.", + "bbox": [ + 114, + 215, + 867, + 274 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "To fix this, I should:", + "bbox": [ + 114, + 286, + 329, + 300 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Either provide an explicit construction for arbitrary $n$", + "2. Or use induction properly, showing how to extend from $n$ to $n + 1$ elements" + ], + "bbox": [ + 112, + 300, + 833, + 330 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "I think I need to reconsider what list would work for this problem. Maybe something like powers of 2, or a list where consecutive elements have a specific relationship? The factorial approach mentioned in my comment might be promising - adding elements like $\\mathbf{a}_1$ , $\\mathbf{a}_2$ , ..., $\\mathbf{a}_n$ where each new element is constructed to maintain the divisibility relationship with all previous elements.", + "bbox": [ + 114, + 342, + 858, + 429 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "I should try constructing a list like $[0, 1!, 2!, \\ldots, (n-1)!!]$ or something similar and verify that it satisfies the divisibility condition. This would give me distinct integers with potentially useful divisibility properties.", + "bbox": [ + 114, + 442, + 836, + 501 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "C. RL Details", + "text_level": 1, + "bbox": [ + 112, + 536, + 258, + 552 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Figure 6 shows the overall distribution of valid proofs in pass@32 testing. We collect the distribution base on our Leanabell-Prover-GD-SFT model.", + "bbox": [ + 112, + 568, + 882, + 600 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 487, + 922, + 509, + 935 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/777becb88cf144ee0b188a5eb4a011a795525f9e8a7316c04c0b6494231fdd17.jpg", + "image_caption": [ + "Figure 6 | Distribution of valid proofs in pass@32. We calculate this distribution based on the Leanabell-Prover-GD-SFT model. To build this set, we remain the formal statements who has at least sampled proofs are valid by Lean 4 compiler." + ], + "image_footnote": [], + "bbox": [ + 122, + 326, + 873, + 607 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 487, + 923, + 507, + 934 + ], + "page_idx": 22 + } +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06122/1f4b320c-f80c-4d99-b14d-3a49e20634f9_model.json b/data/2025/2504_06xxx/2504.06122/1f4b320c-f80c-4d99-b14d-3a49e20634f9_model.json new file mode 100644 index 0000000000000000000000000000000000000000..13df354c9f6764b457687c1a21cec580e2e09e72 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/1f4b320c-f80c-4d99-b14d-3a49e20634f9_model.json @@ -0,0 +1,2897 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.298, + 0.061, + 0.702 + ], + "angle": 270, + "content": "arXiv:2504.06122v3 [cs.AI] 14 Jul 2025" + }, + { + "type": "title", + "bbox": [ + 0.13, + 0.134, + 0.87, + 0.159 + ], + "angle": 0, + "content": "Leanabell-Prover: Posttraining Scaling in Formal Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.189, + 0.778, + 0.224 + ], + "angle": 0, + "content": "Jingyuan Zhang, Qi Wang, Xingguang Ji, Yahui Liu, Yang Yue, Fuzheng Zhang, Di Zhang, Guorui Zhou, Kun Gai" + }, + { + "type": "text", + "bbox": [ + 0.408, + 0.238, + 0.593, + 0.255 + ], + "angle": 0, + "content": "Kuaishou Technology" + }, + { + "type": "title", + "bbox": [ + 0.449, + 0.297, + 0.551, + 0.315 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.34, + 0.886, + 0.562 + ], + "angle": 0, + "content": "Recent advances in automated theorem proving (ATP) through LLMs have highlighted the potential of formal reasoning with Lean 4 codes. However, ATP has not yet been revolutionized by the recent posttraining scaling as demonstrated by Open AI O1/O3 and Deepseek R1. In this work, we investigate the entire posttraining of ATP, aiming to align it with breakthroughs in reasoning models in natural languages. To begin, we continual train current ATP models with a hybrid dataset, which consists of numerous statement-proof pairs, and additional data aimed at incorporating cognitive behaviors that emulate human reasoning and hypothesis refinement. Next, we explore reinforcement learning with the use of outcome reward returned by Lean 4 compiler. Through our designed continual training and reinforcement learning processes, we have successfully improved existing formal provers, including both DeepSeek-Prover-v1.5 and Goedel-Prover, achieving state-of-the-art performance in the field of whole-proof generation. For example, we achieve a \\(59.8\\%\\) pass rate (pass@32) on MiniF2F. This is an on-going project and we will progressively update our findings, release our data and training details." + }, + { + "type": "image", + "bbox": [ + 0.236, + 0.58, + 0.761, + 0.787 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.802, + 0.885, + 0.869 + ], + "angle": 0, + "content": "Figure 1 | Benchmark performance on MiniF2F-test (Zheng et al., 2021). Our method boosts both the two baseline models after employing RL training. Goedel-Prover-RL is our implementation. Our framework surpasses DeepSeek-Prover-v1.5-RL and Goedel-Prover-SFT \\(6.6\\%\\) and \\(2.2\\%\\), respectively." + }, + { + "type": "footer", + "bbox": [ + 0.114, + 0.921, + 0.813, + 0.94 + ], + "angle": 0, + "content": "\\(^{\\text{念}}\\)Equal contributions, and order alphabetically by first name. \\({}^{\\dagger}\\)Corresponding author." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.116, + 0.101, + 0.273, + 0.117 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.132, + 0.885, + 0.293 + ], + "angle": 0, + "content": "Recent large language models (LLMs), such as Open AI O1/O3 and Deepseek R1, which are enhanced by posttraining scaling, emerge with numerous powerful and intriguing reasoning behaviors (Guo et al., 2025; Anthropic, 2025; Team, 2025). Such LLMs have shown impressive performance in solving math problems with natural language. However, the long chain-of-thoughts (CoTs) and final answers in natural language (NL) are substantially challenging for peer review (Wang et al., 2024), especially for theorem proving. Meanwhile, the key advantage of formal languages lies in their verifiability—each reasoning step can be validated by formal theorem verifiers, e.g., Lean (De Moura et al., 2015; Moura and Ullrich, 2021) and Isabelle (Paulson, 1994). As a promising direction, automated theorem proving (ATP) with formal languages (FL) has attracted booming attention from the community of large language models (LLMs)." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.301, + 0.885, + 0.461 + ], + "angle": 0, + "content": "Contrary to solving math problems with natural language, generating proofs using Lean 4 codes (or other formal languages) is more challenging. For example, DeepSeek-Prover v1.5-RL (Xin et al., 2024) achieves only \\(50.0\\%\\) (pass@32) on the Olympiad-level mathematics benchmark MiniF2F (Zheng et al., 2021). However, DeepSeek-R1 (Guo et al., 2025) can achieve \\(100\\%\\) on the same math problems, but in natural language. DeepSeek-Prover-v1.5 (Lin et al., 2025) and STP (Dong and Ma, 2025) show that using extensive synthetic dataset of formal statements and expert iteration (Polu et al., 2022) can boost the whole-proof prover. Meanwhile, some methods (Yang et al., 2023; Wu et al., 2024; Xin et al., 2025) scale up the search budget (e.g., more than 2 million in BFS-Prover (Xin et al., 2025)) for step-wise tactic generation, which seems extremely computational." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.47, + 0.885, + 0.582 + ], + "angle": 0, + "content": "Although RL strategies have already proven their effectiveness in natural language for math problem solving, the performance in formal reasoning has been rather ordinary so far. We find that only Deepseek-Prover releases it RL version, and DeepSeek-Prover-v1.5-RL marginally improves \\(1.8\\%\\) than its supervised fine-tuned model. Compared to the success of reinforcement learning (RL) in natural language reasoning, the potential for improvement in formal language reasoning may still be vast. However, replicating the current successful RL training approaches, which primarily focus on the Qwen2.5 model series, is not straightforward." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.59, + 0.885, + 0.767 + ], + "angle": 0, + "content": "To verify the posttraining scaling in ATP, we begin with the standard whole-proof generation models DeepSeek-Prover-v1.5-SFT and Goedel-Prover-SFT. There are three training stages in our optimization framework. We first collect public and synthetic data to continue training. We also utilize automatic synthetic CoT data to embed the self-reflection capabilities, such as backtracking (abandoning failing approaches) and verification (systematic error-checking), to the fine-tuned model. Next, we employ the GRPO algorithm (Shao et al., 2024) to perform reinforcement learning from proof assistant feedback (RLPAF) on the supervised fine-tuned model. Similar to DeepSeek-Prover-v1.5-RL, the verification results from the Lean compiler serve as reward supervision. After analyzing the validation results on benchmarks, we find our posttraining strategies can effectively boost the overall performance on MiniF2F (Zheng et al., 2021) benchmark." + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.776, + 0.514, + 0.791 + ], + "angle": 0, + "content": "In summary, here are our main contributions:" + }, + { + "type": "text", + "bbox": [ + 0.141, + 0.806, + 0.88, + 0.869 + ], + "angle": 0, + "content": "- We continue train current APT models with more high quality statement-proof data pairs. More importantly, we design synthetic data to enhance the models' self-reflection capabilities, enabling us to pilot cognitive behaviors in our models before applying the RL algorithm." + }, + { + "type": "text", + "bbox": [ + 0.141, + 0.871, + 0.88, + 0.902 + ], + "angle": 0, + "content": "- We investigate the RL training to boost the ATP prover that generates whole mathematical proofs in Lean 4 codes. During training, we employ the Lean 4 verifier to serve as a reward" + }, + { + "type": "list", + "bbox": [ + 0.141, + 0.806, + 0.88, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.924, + 0.505, + 0.935 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.161, + 0.103, + 0.265, + 0.117 + ], + "angle": 0, + "content": "supervision." + }, + { + "type": "text", + "bbox": [ + 0.141, + 0.118, + 0.884, + 0.15 + ], + "angle": 0, + "content": "- The proposed Leanabelle-Prover achieves state-of-the-art performance through our meticulously designed strategy, \\(59.8\\%\\) (pass@32) on MiniF2F-test." + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.15, + 0.885, + 0.199 + ], + "angle": 0, + "content": "- Currently, we collect around 1.52M formal statements, and 0.22M formal statements with detailed informal CoTs and verified proofs. All intermediate models and training data are released to the community1." + }, + { + "type": "list", + "bbox": [ + 0.141, + 0.118, + 0.885, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.223, + 0.284, + 0.24 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.255, + 0.884, + 0.306 + ], + "angle": 0, + "content": "**Lean4 Theorem Proving using LLMs.** With the rapid progress of LLMs, research has explored applying LLMs in FL reasoning to automate theorem proving. Prior research can be briefly classified into two strategies, namely proof-step generation and whole-proof generation." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.311, + 0.886, + 0.506 + ], + "angle": 0, + "content": "Proof-step generation methods train an LLM agent to iteratively generate proof steps by predicting the next tactic based on the current proof state (Polu and Sutskever, 2020; Polu et al., 2022; Lample et al., 2022; Azerbayev et al., 2023; Yang et al., 2023; Lin et al., 2024; DeepMind, 2024; Trinh et al., 2024; Wu et al., 2024; Xin et al., 2024; Li et al., 2024; Xin et al., 2025). These methods apply FL executor to verify after each step of generation and is able to discover some non-trivial proofs. For example, LeanDojo (Yang et al., 2023) first establishes relationship models between various tactic states within proofs. It then retrieves relevant premises from the mathematical library based on the current output state (as collected from a Lean verifier) and inputs these premises into an encoder-decoder model to generate the subsequent tactic. Employing Monte-Carlo tree search (MCTS) (Coulom, 2006) is another common solution in this field. However, as the complexity of the proof increases, tree search methods become computationally expensive and lack high-level NL planning to control the overall structure of the proof (Wang et al., 2025)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.512, + 0.884, + 0.675 + ], + "angle": 0, + "content": "Whole-proof generation methods treat theorem proving as a kind of code generation problem, where LLMs generate the entire proof in a single attempt by using supervised training or prompt engineering (Xin et al., 2024; Lin et al., 2025; Dong and Ma, 2025; Wang et al., 2025). This approach leverages the NL reasoning and high-level planning capabilities of LLMs with predictable computation costs, but lacks intermediate feedback from FL executors. Thus, the core challenge for improving whole-proof generation is that there are no sufficient Lean 4 codes to eliminate the gaps between NL and FL modalities. However, generating such data requires high levels of expertise, making it difficult to scale. As a result, the generated proofs often lack post-hoc analysis of errors and tend to perform badly on tedious questions that require non-trivial solutions." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.699, + 0.884, + 0.862 + ], + "angle": 0, + "content": "Reinforcement Learning for Lean4 Theorem Proving. There are two typical solutions to utilize RL for Lean4 Theorem Proving. In DeepSeek-Prover-v1.5-RL (Xin et al., 2024), the authors employ GRPO algorithm and takes the feedback signals from Lean 4 verifier as reward that reveals the proofs verified as correct or wrong. Such methods only uses the compilation feedback from the entire proof process as the reward result. In this paper, we employ the whole-proof generation approach, so we continue with this same solution. In contrast, Xin et al. (2025) use DPO (Rafailov et al., 2023) to refine the policy LLM by leveraging preference pairs naturally generated during tree search such as MCTS (Coulom, 2006). Therefore, the second solution utilizes the tactic state of each step during the compilation process. However, the effectiveness of existing methods still need improvement." + }, + { + "type": "page_footnote", + "bbox": [ + 0.14, + 0.888, + 0.573, + 0.902 + ], + "angle": 0, + "content": "1https://github.com/Leanabell-LM/Leanabell-Prover" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.923, + 0.506, + 0.936 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.112, + 0.102, + 0.888, + 0.233 + ], + "angle": 0, + "content": "Cognitive Behaviors Gandhi et al. (2025) first reveal that models without integrating human-like reasoning abilities (e.g., verification, backtracking, subgoal setting and backward chaining) are hard to obtain significant improvements by directly applying RL algorithms. Similarly, Wan et al. (2025) propose that LLMs lack a specialized design for acquiring meta-thinking, resulting in low efficacy. Zeng et al. (2025) and Liu et al. (2025) propose that the training may directly start from the base models with such cognitive behaviors—a paradigm referred to as zero RL training. Considering the barely satisfactory performance of RL strategies in the formal reasoning field, we have reasons to suspect whether this is due to this fundamental reason." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.255, + 0.304, + 0.274 + ], + "angle": 0, + "content": "3. Model Training" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.289, + 0.318, + 0.306 + ], + "angle": 0, + "content": "3.1. Continual Training" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.315, + 0.886, + 0.412 + ], + "angle": 0, + "content": "Base Model. We begin with the previous whole-proof generation models DeepSeek-Prover v1.5-SFT (Xin et al., 2024) and Goedel-Prover (Lin et al., 2025) that are two well-trained versions after the supervised fine-tuning stage. Both of these two models are with 7 billion parameters. Specifically, the two models are trained with proofs added detailed explanatory informal comments. Therefore, the model possesses the basic ability to align natural language descriptions with Lean 4 codes." + }, + { + "type": "image", + "bbox": [ + 0.12, + 0.424, + 0.877, + 0.742 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.755, + 0.886, + 0.789 + ], + "angle": 0, + "content": "Figure 2 | Distributions of math domains in various Lean 4 dataset. Lean Workbook, Goedel-Prover, STP Lean and NuminaMath are training set. MiniF2F and ProofNet are test set." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.825, + 0.886, + 0.875 + ], + "angle": 0, + "content": "Statement Formalization. Similar to Lin et al. (2025), we train a formalizer, based on Qwen2.5-32B-Coder-Instruct (Hui et al., 2024), to convert the olympiad-level math problems in natural language into formal statements. We collect the formal and informal statement pairs sourced" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.924, + 0.506, + 0.936 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.263, + 0.099, + 0.737, + 0.236 + ], + "angle": 0, + "content": "
Data SourceFSFS+IC+PF
Lean Workbook (Ying et al., 2024)140K40K
STP-Lean (Dong and Ma, 2025)400K36K
NuminaMath (Li et al., 2024)520K97K
AoPS (AoPS)370K26K
PromptCoT (Zhao et al., 2025)90K20K
Total1.52M0.22M
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.245, + 0.884, + 0.296 + ], + "angle": 0, + "content": "Table 1 | Distribution of our training sources, including released data by existing models and synthetic data from informal math problems. FS, PF and IC refer to formal statements, proofs and informal comments, respectively." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.318, + 0.885, + 0.451 + ], + "angle": 0, + "content": "from Goedel-Prover \\(^{2}\\) (around 30K). The training details of our formalizer are presented in Appendix A. With the trained formalizer, we convert the natural language statement and then verify with Lean 4 compiler. After these procedures, we finally get 520K formal statements from NuminaMath (Li et al., 2024) and 370K formal statements from AoPS (AoPS). Recently, Zhao et al. (2025) proposed to utilize LLMs to synthesize math problems through mathematical concepts. Based on the data analysis in Figure 2, we employ the PromptCoT framework to synthesize math problems at different levels, including AMC, AIME and USAMO, on three majority math domains (i.e., algebra, number theory and calculus)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.474, + 0.884, + 0.669 + ], + "angle": 0, + "content": "Data Curation. We develop a comprehensive Lean 4 code completion dataset for the supervised fine-tuning, as shown in Table 1. These theorems are sourced from various projects, such as the standard Lean 4 math library Mathlib4 (mathlib4, 2025), Lean Workbook (Ying et al., 2024), synthetic theorems from Goedel-Prover (Lin et al., 2025) and STP (Dong and Ma, 2025). Besides the above-mentioned public data, we use deepseek-prover and goedel-prover to sample proofs that can be correctly verified by Lean 4 compiler. In this manner, we collect Lean data from NuminaMath and AoPS. Similarly, we formalize around 90K math problems synthesized through PromptCoT. In DeepSeek-Prover-v1.5 (Xin et al., 2024), the authors claim that incorporating natural language reasoning before generating theorem proof code can eliminate the gap between problem solving strategies in natural language and theorem proving in Lean. Thus, we also collect comments for part of the data. Finally, we collected around 1.52M formal statements and 0.22M statements with detailed informal comments and verified proofs." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.675, + 0.884, + 0.758 + ], + "angle": 0, + "content": "As shown in Figure 2, we find that the data distributions of Lean Workbook, Goedel-Prover, STP Lean and NuminaMath cover well the MiniF2F test set. On the contrast, there is an obvious domain bias for ProofNet, which is also revealed by (Lin et al., 2025). It indicates a promising direction for further improvement by expanding data in specific mathematical domains. For fairness, we do not adjust the distributions of the training set in our optimizations." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.782, + 0.884, + 0.864 + ], + "angle": 0, + "content": "Implementation After obtaining a large collection of formalized statements with proofs, we continuously train once. We use the lightweight framework SWIFT3 for the supervised fine-tuning (SFT). The SFT experiment is trained on 8 NVIDIA H100 GPUs with the following hyperparameters: a learning rate of \\(5 \\times 10^{-5}\\), a global batch size of 32 over 2 epochs, and a weight decay coefficient of 0.1." + }, + { + "type": "page_footnote", + "bbox": [ + 0.138, + 0.874, + 0.696, + 0.889 + ], + "angle": 0, + "content": "\\(^{2}\\)https://huggingface.co/datasets/Goedel-LM/Lean-workbook-proofs" + }, + { + "type": "page_footnote", + "bbox": [ + 0.141, + 0.889, + 0.484, + 0.902 + ], + "angle": 0, + "content": "3https://github.com/modelscope/ms-swift" + }, + { + "type": "list", + "bbox": [ + 0.138, + 0.874, + 0.696, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.923, + 0.506, + 0.936 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.102, + 0.427, + 0.119 + ], + "angle": 0, + "content": "3.2. Integrating Cognitive Behaviors" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.128, + 0.885, + 0.195 + ], + "angle": 0, + "content": "Gandhi et al. (2025) have revealed the importance of cognitive behaviors in base models before utilizing RL strategies. Following a similar idea, we induce reflection-like behaviors through specially designed interventions. We propose two kinds of CoT templates to integrate the self-reflection capabilities: Lean completion and rewriting." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.219, + 0.885, + 0.333 + ], + "angle": 0, + "content": "Lean Completion. We sample 5K formal statements used in our previous continue training stage, which are not \\(100\\%\\) correct in the pass@16 sampling. We find the position where the first error tactic appears, and re-sample for completion. Once collecting new valid proofs, we can use the following prompt to ask Claude (Anthropic, 2025) to generate the reflection response. We observe that an effective reflection process can only be achieved by combining incorrect proofs, correct proofs, and tactic error messages. Thus, the feedback collected from Lean verifier is also used to create the prompt. An example is presented in Section B.1." + }, + { + "type": "code", + "bbox": [ + 0.117, + 0.351, + 0.88, + 0.773 + ], + "angle": 0, + "content": "Lean Completion Prompt (Claude) \n# Initial Proof \n' \\(\\text{巧} ^ { \\prime }\\) lean4 \n{old_code} \n# Lean Feedback \n{error} \n# Correct Proof \n' \\(\\text{巧} ^ { \\prime }\\) lean4 \n{new_code} \nYour task is to generate a reflection of a Lean4 proof as follows: 1. You are provided with a lean proof code that failed to complete the proof, the verify feedback, and a revised correct proof. 2. You need to act as a verifier to check the code step by step and point out where the code fails with incorrect tactics. 3. Provide an alternative method, such as those in the correct proof. 4. Act as you are verifying your own proof.. Here are some rules you need to follow: 1. At the beginning, you should start with a conjunction phrase such as 'let's verify' and claim you need to verify the proof. 2. Instead of directly pointing out the issue, your answer should show the process to identify the incorrect step. 3. Do not refer to Lean Feedback, Correct Proof, or anything that shows you have already known the issue before your reflection. 4. Do not provide any new Lean4 code block, you don't need to write a correct proof. 5. Do not include a summary section. 6. Again, do not refer to Lean Feedback, Correct Proof, do not write anything like 'as shown in the correct proof'. Now, start with a conjunction phrase and require you need to check the proof, do not directly claim there is an issue." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.799, + 0.885, + 0.898 + ], + "angle": 0, + "content": "Rewriting. Based on the above-mentioned Lean completion, there are two main steps in the rewriting strategy. First, we suspect that the generation of incorrect proofs is, to some extent, due to the incorrect problem-solving comments being generated. Therefore, we introduce Qwen2.5-72B-instruct (Team, 2024) to evaluate the problem-solving comments and then regenerate the correct problem-solving comments. Second, we provide Claude with both the invalid and newly rewritten valid Lean 4 code to generate comprehensive Chains of Thought (CoTs) that explain" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.925, + 0.506, + 0.936 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.102, + 0.881, + 0.134 + ], + "angle": 0, + "content": "the reasoning process. In this manner, we collect 19K samples with CoTs (See the detailed examples in Appendix B.2). Here are the prompt templates for these two steps:" + }, + { + "type": "code_caption", + "bbox": [ + 0.319, + 0.153, + 0.678, + 0.166 + ], + "angle": 0, + "content": "Rewriting - Step 1 (Qwen2.5-72B-Instruct)" + }, + { + "type": "code", + "bbox": [ + 0.13, + 0.169, + 0.856, + 0.239 + ], + "angle": 0, + "content": "You are an experienced mathematics evaluation teacher. You will be provided with a math problem and the corresponding solution idea.. \nPlease determine whether the solution idea is correct. If it is, please output \"Correct\", otherwise please output \"Incorrect\". If the solution idea is incorrect, please provide the correct solution idea, and the output of the solution idea should be included within \\*\\* and \\*\\*." + }, + { + "type": "code", + "bbox": [ + 0.136, + 0.249, + 0.39, + 0.262 + ], + "angle": 0, + "content": "The output format is as follows:" + }, + { + "type": "code", + "bbox": [ + 0.136, + 0.271, + 0.593, + 0.284 + ], + "angle": 0, + "content": "1. Judgement: Incorrect. Solution: “‘‘Solution idea’’”" + }, + { + "type": "code", + "bbox": [ + 0.137, + 0.284, + 0.327, + 0.295 + ], + "angle": 0, + "content": "2. Judgement: Correct." + }, + { + "type": "list", + "bbox": [ + 0.136, + 0.271, + 0.593, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "code", + "bbox": [ + 0.136, + 0.305, + 0.296, + 0.34 + ], + "angle": 0, + "content": "[math problem start] \n{problem} \n[math problem end]" + }, + { + "type": "code", + "bbox": [ + 0.136, + 0.35, + 0.303, + 0.385 + ], + "angle": 0, + "content": "[solution idea start] \n{solution} \n[solution idea end]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.42, + 0.885, + 0.468 + ], + "angle": 0, + "content": "With these synthesized data, we employ our second-stage continual training, with a learning rate of \\(5 \\times 10^{-5}\\) and overall batch size of 1024 for one epoch. Finally, we obtain the model, named as Leanabell-Prover-SFT." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.491, + 0.361, + 0.507 + ], + "angle": 0, + "content": "3.3. Reinforcement Learning" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.517, + 0.884, + 0.598 + ], + "angle": 0, + "content": "We integrate reinforcement learning (RL) with the Lean 4 theorem prover to automate the discovery of valid proofs. The RL agent interacts with the Lean 4 environment, generating whole proofs and receiving feedback from Lean 4 compiler as reward signals. The agent's objective is to maximize cumulative rewards by learning to generate syntactically correct, logically valid proofs for an input formal statement." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.623, + 0.885, + 0.72 + ], + "angle": 0, + "content": "Policy Optimization Algorithms. We employ the recent GRPO (Shao et al., 2024) as our RL algorithm. For each input formal statement \\( q \\), GRPO samples a group of outputs \\( \\{o_1, o_2, \\dots, o_G\\} \\) from the old policy \\( \\pi_{\\theta_{old}} \\) and then collect the feedback \\( \\{\\tau_1, \\tau_2, \\dots, \\tau_G\\} \\) for the group of responses through Lean 4 compiler. According to each feedback status \\( \\tau_i \\), we assign a particular reward. Then, the advantage of the \\( i \\)-th output is calculated by normalizing the group-level rewards \\( \\{R_1, R_2, \\dots, R_G\\} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.397, + 0.718, + 0.884, + 0.756 + ], + "angle": 0, + "content": "\\[\n\\hat {A} _ {i, t} = \\frac {r _ {i} - \\operatorname {m e a n} \\left(\\left\\{R _ {i} \\right\\} _ {i = 1} ^ {G}\\right)}{\\operatorname {s t d} \\left(\\left\\{R _ {i} \\right\\} _ {i = 1} ^ {G}\\right)}. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.76, + 0.776, + 0.777 + ], + "angle": 0, + "content": "Finally, we optimizes the policy model \\(\\pi_{\\theta}\\) by maximizing the following objective:" + }, + { + "type": "equation", + "bbox": [ + 0.217, + 0.786, + 0.884, + 0.854 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {J} _ {\\mathrm {G R P O}} (\\theta) = \\mathbb {E} _ {(q, a) \\sim \\mathcal {D}, \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta_ {\\mathrm {o l d}}} (\\cdot | q)} \\\\ \\left. \\right.\\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\frac {1}{| o _ {i} |} \\sum_ {t = 1} ^ {| o _ {i} |} \\left(\\min \\left(r _ {i, t} (\\theta) \\hat {A} _ {i, t}, \\operatorname {c l i p} \\left(r _ {i, t} (\\theta), 1 - \\varepsilon , 1 + \\varepsilon\\right) \\hat {A} _ {i, t}\\right)\\right)\\right], \\tag {2} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.861, + 0.171, + 0.874 + ], + "angle": 0, + "content": "where" + }, + { + "type": "equation", + "bbox": [ + 0.391, + 0.871, + 0.884, + 0.907 + ], + "angle": 0, + "content": "\\[\nr _ {i, t} (\\theta) = \\frac {\\pi_ {\\theta} \\left(o _ {i , t} \\mid q , o _ {i , < t}\\right)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} \\left(o _ {i , t} \\mid q , o _ {i , < t}\\right)}, \\tag {3}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.924, + 0.505, + 0.935 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.102, + 0.888, + 0.135 + ], + "angle": 0, + "content": "\\(\\varepsilon\\) is a hyperparameter. In our experiments, we set \\(\\varepsilon = 0.2\\). Notably, we do not use the Kullback-Leibler (KL) divergence penalty." + }, + { + "type": "title", + "bbox": [ + 0.379, + 0.155, + 0.619, + 0.169 + ], + "angle": 0, + "content": "Rewriting - Step 2 (Claude)" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.174, + 0.235, + 0.184 + ], + "angle": 0, + "content": "Wrong code" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.185, + 0.202, + 0.195 + ], + "angle": 0, + "content": "\" ' lean4" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.197, + 0.218, + 0.207 + ], + "angle": 0, + "content": "lean code1" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.208, + 0.163, + 0.216 + ], + "angle": 0, + "content": "," + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.23, + 0.251, + 0.24 + ], + "angle": 0, + "content": "Correct code" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.241, + 0.202, + 0.251 + ], + "angle": 0, + "content": "\" ' lean4" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.253, + 0.22, + 0.263 + ], + "angle": 0, + "content": "lean code2" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.264, + 0.162, + 0.271 + ], + "angle": 0, + "content": "," + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.286, + 0.823, + 0.31 + ], + "angle": 0, + "content": "I have given you with two Lean4 code solutions to the same problem. The first solution fails to compile in Lean4, while the second solution compiles successfully." + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.31, + 0.264, + 0.32 + ], + "angle": 0, + "content": "Your task is to:" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.321, + 0.78, + 0.332 + ], + "angle": 0, + "content": "1. Act as a verification assistant and carefully compare these two code snippets." + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.332, + 0.821, + 0.353 + ], + "angle": 0, + "content": "2. Identify the specific errors or flawed strategies in the first solution that caused compilation failure." + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.354, + 0.829, + 0.376 + ], + "angle": 0, + "content": "3. Explain the reasoning process that would lead someone from the incorrect approach to the correct solution." + }, + { + "type": "list", + "bbox": [ + 0.137, + 0.321, + 0.829, + 0.376 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.387, + 0.86, + 0.434 + ], + "angle": 0, + "content": "When analyzing the code, please simulate the thought process of someone examining their own proof. Begin sections of your analysis with phrases like \"Let's verify my proof...\", \"Wait, I see an issue here...\", or \"Let me reconsider this approach...\" This should demonstrate how someone might catch and correct their own mistakes." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.443, + 0.838, + 0.479 + ], + "angle": 0, + "content": "The analysis emphasizes conceptual understanding over syntax details, explaining the fundamental logical or strategic errors in the initial solution and demonstrating how the corrected solution properly addresses these conceptual problems." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.489, + 0.421, + 0.5 + ], + "angle": 0, + "content": "Please structure your response with:" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.501, + 0.588, + 0.511 + ], + "angle": 0, + "content": "- Identification of specific errors in the first solution." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.512, + 0.635, + 0.522 + ], + "angle": 0, + "content": "- Explanation of the conceptual issues that led to these errors." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.523, + 0.848, + 0.544 + ], + "angle": 0, + "content": "- How to fix the conceptual problems in error so as to generate the problem-solving idea of the second solution?" + }, + { + "type": "list", + "bbox": [ + 0.136, + 0.501, + 0.848, + 0.544 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.555, + 0.854, + 0.658 + ], + "angle": 0, + "content": "Do not provide any new Lean4 code beyond what I've given you - focus exclusively on analyzing the provided code. Don't include the phased titles in the output results, such as \"Identification of Specific Errors in the First Solution\", \"Conceptual Issues That Led to These Errors\", etc. Also, don't use expressions like \"the first solution\" or \"the second solution\". Use \"current solution\" to represent \"first solution\". Although you used the second solution for auxiliary analysis, avoid revealing in your response that you've seen its content. For example, refrain from saying things like 'I noticed that in the new solution.' Instead, respond as if you're thinking independently, based solely on the first solution." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.689, + 0.885, + 0.833 + ], + "angle": 0, + "content": "Reward Function. Unlike stepwise rewards, the feedback is provided only after the full proof is compiled by Lean 4 verifier in our experiments. Our rewards are derived from: (1) Terminal reward \\( R_{\\text{success}} \\): a scalar reward granted if the entire proof is validated successfully by Lean 4 verifier. (2) Penalty \\( R_{\\text{fail}} \\): a negative reward for proofs with critical errors (e.g., type mismatches, infinite loops, unsolved goals and etc). Moreover, we observe that there are warnings in the feedback, such as some unnecessary or redundant steps have no negative effects on the final validation. In our experiments, we ignore warning cases as long as the compilation and verification process passes successfully. So, given the feedback \\( \\tau \\) from Lean 4 compiler, our final reward function can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.284, + 0.843, + 0.884, + 0.885 + ], + "angle": 0, + "content": "\\[\nR (\\tau) = \\left\\{ \\begin{array}{l l} R _ {\\text {s u c c e s s}} & \\text {i f L e a n 4 f u l l y v a l i d a t e s} \\tau \\\\ R _ {\\text {f a i l}} & \\text {o t h e r w i s e (s y n t a x e r r o r s / t i m e o u t)} \\end{array} \\right. \\tag {4}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.924, + 0.506, + 0.936 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.117, + 0.101, + 0.886, + 0.23 + ], + "angle": 0, + "content": "Implementation We conduct RL training based on the Leanabell-Prover-SFT. We use a constant learning rate of \\(1 \\times 10^{-6}\\). For each theorem, we sample a group of 32 candidate proofs, with maximal rollout length set to 8192. The training global batch size is set to \\(32 \\times 32 = 1024\\). On the RL training data, we select samples from those whose number of validations in Pass@32 falls within the range of [2, 16]. We believe this subset of data has a certain level of difficulty while providing exploration space, making it effective for updating the policy model. The detailed distribution of pass@32 is presented in Figure 6 in Appendix C. Finally, we obtain the RL version model named as Leanabell-Prover-RL." + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.255, + 0.273, + 0.274 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.286, + 0.886, + 0.335 + ], + "angle": 0, + "content": "Benchmarks We follow the previous work (Lin et al., 2025; Xin et al., 2024; Wang et al., 2024) and mainly validate the effectiveness of our proposed method on the most commonly-used MiniF2F-test (Zheng et al., 2021)." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.361, + 0.886, + 0.62 + ], + "angle": 0, + "content": "Results on MiniF2F. We have two versions of our models posttrained from two strong prover models: Deepseek-Prover-v1.5-SFT and Goedel-Prover-SFT, namely Leanabell-Prover-DS and Leanabell-Prover-GD. We mainly compare current whole proof generation methods, while ignore those with proof-step methods using far more inference-compute. As shown in Table 2, our posttraining framework boosts both DeepSeek-Prover-v1.5-SFT and Goedel-Prover-SFT models. On the minimum sample budget, Leanabell-Prover-GD-RL achieves the SOTA of MiniF2F (59.8% on pass@32), which gains up to 2.2% (i.e. from Goedel-Prover SFT, from 57.6% to 59.8%). We can observe that following our continual training phase, our method (Leanabell-Prover-DS-SFT) shows improvement over its base model (DeepSeek-Prover-v1.5-SFT), and the RL version continues to effectively enhance its performance. Meanwhile, Leanabell-Prover-GD-SFT performs almost identically to Leanabell-Prover-DS-SFT. This is reasonable, as Goedel-Prover-SFT is finetuned from DeepSeek-Pover-v1.5-base, with a significantly larger amount of data compared to our continual training stage. Therefore, our continual training on Leanabell-Prover-GD-SFT primarily adjusts the model's reasoning ability across different math domain distributions and incorporates the proper CoT format with cognitive behaviors into the current training data. This makes the checkpoint more conducive to RL training." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.626, + 0.886, + 0.772 + ], + "angle": 0, + "content": "We also increase the sampling budget to compare the performance gains. For the SFT models, as can be seen, the baseline DeepSeek-Prover-v1.5-SFT achieves around \\(2.2\\%\\) performance gains (i.e., \\(48.2\\%\\) to \\(50.4\\%\\)), as increasing sampling budget from 32 to 128. Within our configurations, our Leanabell-Prover-DS-SFT and Leanabell-Prover-GD-SFT models also achieve \\(1.8\\%\\) (i.e., \\(54.9\\%\\) to \\(56.7\\%\\)) and \\(1.2\\%\\) (i.e., \\(58.2\\%\\) to \\(59.4\\%\\)) performance gains on the same inference scaling experiments, respectively. For the RL models, DeepSeek-Prover-v1.5-RL achieves \\(1.6\\%\\) performance gains (i.e., \\(50.0\\%\\) to \\(51.6\\%\\)), while our Leanabell-Prover-DS-RL achieves more gains (i.e., \\(56.6\\%\\) to \\(59.0\\%\\)). Therefore, after the model has undergone SFT and RL training, our models still maintain the exploration capabilities." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.797, + 0.886, + 0.895 + ], + "angle": 0, + "content": "Exploration Abilities and RL. We first examine our two SFT models, with their pass@16 accuracy at different sampling temperatures. This metric can serve as an indicator of the policy's exploration ability and is particularly relevant for RL, as it reflects the policy's ability to generate responses that can achieve a positive reward. As shown in Figure 3, we find both SFT models are exploratory, and thus ready for RL. The RL training rewards are shown in Figure 4. During our experiments, we also compared the original GRPO with Dr. GRPO (Liu et al., 2025), and" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.924, + 0.505, + 0.935 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.202, + 0.201, + 0.797, + 0.709 + ], + "angle": 0, + "content": "
MethodSample budgetminiF2F-test
TheoremLlama [28]12833.6%
DeepSeek-Prover-v1 [32]12846.1% ± 0.5%
DeepSeek-Prover-v1.5-Base [33]12829.7% ± 0.5%
320039.2%
640042.2%
DeepSeek-Prover-v1.5-SFT [33]3248.2% ± 0.6%
6449.6% ± 0.7%
12850.4% ± 0.4%
320053.3% ± 0.5%
DeepSeek-Prover-v1.5-RL [33]3250.0% ± 0.5%
6450.7% ± 0.4%
12851.6% ± 0.5%
320054.9% ± 0.7%
STP [7]12857.7% ± 0.6%
320061.7% ± 0.6%
Goedel-Prover-SFT [15]3257.6% ± 0.7%
320062.7%
Leanabell-Prover-DS-SFT3254.9%
6455.3%
12856.7%
Leanabell-Prover-DS-RL3256.6%
6457.4%
12859.0%
Leanabell-Prover-GD-SFT3258.2%
6459.0%
12859.4%
Leanabell-Prover-GD-RL3259.8%
6460.7%
12861.1%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.718, + 0.885, + 0.785 + ], + "angle": 0, + "content": "Table 2 | Comparison with state-of-the-art methods on the miniF2F-test dataset. The notation \\(\\mu \\pm \\sigma\\) denotes the average accuracy \\(\\mu\\) and the standard deviation \\(\\sigma\\). \"DS\" and \"GD\" refer to using the DeepSeek-Prover-v1.5-SFT and Goedel-Prover-SFT as base models to continue SFT and RL training, respectively." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.924, + 0.511, + 0.936 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.31, + 0.103, + 0.688, + 0.338 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.351, + 0.765, + 0.37 + ], + "angle": 0, + "content": "Figure 3 | Exploration ability: pass@16 measures how well base models explore." + }, + { + "type": "image", + "bbox": [ + 0.12, + 0.382, + 0.482, + 0.59 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.382, + 0.876, + 0.59 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.604, + 0.884, + 0.637 + ], + "angle": 0, + "content": "Figure 4 | Left: Reward curve during training Leanabell-Prover-Prover-DS-RL. Right: Reward curve during training Leanabell-Prover-Prover-GD-RL." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.661, + 0.885, + 0.856 + ], + "angle": 0, + "content": "found that the training dynamics remained largely consistent under these two RL training algorithms. This may be attributed to the fact that the length of different rollout responses, regardless of whether they are correct or incorrect, does not vary significantly in formal language reasoning. We have also observed that selecting an appropriate prompt set is crucial for RL training. Merely using pass@N as the sole criterion is insufficient to unlock the full potential of RL. As shown in Figure 5, we analyze the distributions of error problems across different source types in the MiniF2F-test set. We observed that, based on DeepSeek-Prover-v1.5-SFT, errors can be reduced across all data source types in MiniF2F-test set, especially for AMC, MATH, and CUSTOM. However, this improvement is significantly reduced in optimization results based on Goedel-Prover-SFT. This suggests that such as the intrinsic difficulty level of the statements (e.g., whether they are at the AIME or IMO level), the coverage of mathematical domains, and the balance with the prover model's capabilities, are also important." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.923, + 0.509, + 0.936 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.119, + 0.101, + 0.485, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.515, + 0.101, + 0.881, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.269, + 0.82, + 0.288 + ], + "angle": 0, + "content": "Figure 5 | Distribution of problem types that failed verification on the MiniF2F-test set." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.31, + 0.557, + 0.328 + ], + "angle": 0, + "content": "5. Conclusion, Limitation, and Future Work" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.341, + 0.885, + 0.455 + ], + "angle": 0, + "content": "We present a series of ATP models, named Leanabell-Proverseries, by investigating the posttraining scaling of current provers. Leanabell-Prover is started with DeepSeek-Prover-v1.5-SFT and Goedel-Prover-SFT which are two well-trained whole-proof generation models. We first collect a large collection of formalized statements with proofs to continue training. More importantly, we embed cognitive behaviors into the base models by applying a second-stage continual training on such synthetic data. With such prepared SFT models, we finally achieve the final performance through the RL optimization." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.48, + 0.885, + 0.626 + ], + "angle": 0, + "content": "Limitations. As we find that the base prover model (i.e., DeepSeek-Prover-v1.5) is a weak LLM compared to those used for posttraining scaling in natural languages, such as Deepseek v3 and the Qwen2.5 family. Although we have piloted to integrate cognitive behaviors into the model and selected the prompt set of RL according to the ability of our SFT models, the expected stronger RL performance has not fully materialized. Our findings right now are more in line with the replication on weak LLMs with RL (Liu et al., 2025; Zeng et al., 2025). Moreover, although we achieved performance gains, we observe that the integrated self-reflection capacities demonstrate a weakening trend after RL training. This suggests that effectively integrating cognitive behaviors into such weak base LLMs remains highly challenging." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.651, + 0.883, + 0.684 + ], + "angle": 0, + "content": "Future Work. Our goal is to fully invoke formal reasoning abilities, and thus we will continue to explore the following directions (hopefully can achieve some improvement):" + }, + { + "type": "text", + "bbox": [ + 0.141, + 0.702, + 0.884, + 0.862 + ], + "angle": 0, + "content": "- Bridging formal reasoning with natural languages. Reasoning in formal languages has natural benefits for theorem proving, including no mathematical knowledge hallucination, and all steps and states with verified feedback. However, reasoning abilities of current formal provers (including our current work) still lag behind those of natural language reasoning models. We have made the first step to design a CoT template and synthesize data accordingly, which is intended to insert natural language information to help formal reasoning. We also tried Deepseek R1 with prompting for formal proof generation, which achieves \\(51.6\\%\\) (pass@32) on MiniF2F-test. Therefore, we hope to develop more effective manners that can transfer the math knowledge and reasoning abilities in natural languages into formal proof generation." + }, + { + "type": "text", + "bbox": [ + 0.141, + 0.863, + 0.885, + 0.896 + ], + "angle": 0, + "content": "- Bridging whole proof generation with proof-step methods. We believe current RL framework can help bridging these two lines of methods. For example, we can replace the" + }, + { + "type": "list", + "bbox": [ + 0.141, + 0.702, + 0.885, + 0.896 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.924, + 0.511, + 0.936 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.159, + 0.101, + 0.885, + 0.152 + ], + "angle": 0, + "content": "sampling-based response-level rollout in RL into proof-step rollout for better exploration (but still rollout into a whole proof or reach the maximum length, then calculate the response-level reward), thus improving the learning efficiency of the RL training stage." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.175, + 0.23, + 0.192 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.125, + 0.206, + 0.885, + 0.24 + ], + "angle": 0, + "content": "[1] Anthropic. Claude 3.7 Sonnet System card. 2025. URL https://www.anthropic.com/news/claudi-3-7-sonnet." + }, + { + "type": "ref_text", + "bbox": [ + 0.126, + 0.249, + 0.886, + 0.283 + ], + "angle": 0, + "content": "[2] AoPS. Art of problem solving. https://artofproblemsolving.com/. Accessed: [date]." + }, + { + "type": "ref_text", + "bbox": [ + 0.126, + 0.293, + 0.884, + 0.341 + ], + "angle": 0, + "content": "[3] Z. Azerbayev, H. Schoelkopf, K. Paster, M. D. Santos, S. McAleer, A. Q. Jiang, J. Deng, S. Biderman, and S. Welleck. LLemma: An open language model for mathematics. arXiv preprint arXiv:2310.10631, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.126, + 0.351, + 0.883, + 0.384 + ], + "angle": 0, + "content": "[4] R. Coulom. Efficient selectivity and backup operators in monte-carlo tree search. In International conference on computers and games, pages 72-83. Springer, 2006." + }, + { + "type": "ref_text", + "bbox": [ + 0.126, + 0.394, + 0.885, + 0.427 + ], + "angle": 0, + "content": "[5] L. De Moura, S. Kong, J. Avigad, F. Van Doorn, and J. von Raumer. The Lean theorem prover (system description). In International Conference on Automated Deduction (CAD), 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.126, + 0.437, + 0.881, + 0.469 + ], + "angle": 0, + "content": "[6] DeepMind. Alphaproof and Alphageometry, July 2024. URL https://deepmind.google.de/discover/blog/ai-solves-imo-problems-at-silver-medal-level/." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.479, + 0.881, + 0.513 + ], + "angle": 0, + "content": "[7] K. Dong and T. Ma. Stp: Self-play llm theorem provers with iterative conjecturing and proving. arXiv preprint arXiv:2502.00212, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.126, + 0.523, + 0.883, + 0.571 + ], + "angle": 0, + "content": "[8] K. Gandhi, A. Chakravarthy, A. Singh, N. Lile, and N. D. Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars. arXiv preprint arXiv:2503.01307, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.126, + 0.581, + 0.884, + 0.63 + ], + "angle": 0, + "content": "[9] D. Guo, D. Yang, H. Zhang, J. Song, R. Zhang, R. Xu, Q. Zhu, S. Ma, P. Wang, X. Bi, et al. Deepseek-R1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.64, + 0.884, + 0.673 + ], + "angle": 0, + "content": "[10] B. Hui, J. Yang, Z. Cui, J. Yang, D. Liu, L. Zhang, T. Liu, J. Zhang, B. Yu, K. Dang, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.683, + 0.882, + 0.732 + ], + "angle": 0, + "content": "[11] G. Lample, T. Lacroix, M.-A. Lachaux, A. Rodriguez, A. Hayat, T. Lavril, G. Ebner, and X. Martinet. Hypertree proof search for neural theorem proving. Advances in Neural Information Processing Systems (NeurIPS), 35, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.742, + 0.885, + 0.775 + ], + "angle": 0, + "content": "[12] J. Li, E. Beeching, L. Tunstall, B. Lipkin, R. Soletskyi, S. C. Huang, K. Rasul, L. Yu, A. Jiang, Z. Shen, Z. Qin, B. Dong, L. Zhou, Y. Fleureau, G. Lample, and S. Polu. Numinamath, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.785, + 0.882, + 0.833 + ], + "angle": 0, + "content": "[13] Y. Li, D. Du, L. Song, C. Li, W. Wang, T. Yang, and H. Mi. Hunyuanprover: A scalable data synthesis framework and guided tree search for automated theorem proving. arXiv preprint arXiv:2412.20735, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.844, + 0.882, + 0.877 + ], + "angle": 0, + "content": "[14] H. Lin, Z. Sun, Y. Yang, and S. Welleck. Lean-star: Learning to interleave thinking and proving. arXiv preprint arXiv:2407.10040, 2024." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.206, + 0.886, + 0.877 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.923, + 0.51, + 0.936 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.101, + 0.885, + 0.15 + ], + "angle": 0, + "content": "[15] Y. Lin, S. Tang, B. Lyu, J. Wu, H. Lin, K. Yang, J. Li, M. Xia, D. Chen, S. Arora, et al. Goedelprover: A frontier model for open-source automated theorem proving. arXiv preprint arXiv:2502.07640, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.16, + 0.883, + 0.192 + ], + "angle": 0, + "content": "[16] Z. Liu, C. Chen, W. Li, P. Qi, T. Pang, C. Du, W. S. Lee, and M. Lin. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.203, + 0.881, + 0.235 + ], + "angle": 0, + "content": "[17] mathlib4. The math library of lean 4, 2025. URL https://github.com/leanprover-community/mathlib4." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.246, + 0.881, + 0.278 + ], + "angle": 0, + "content": "[18] L. d. Moura and S. Ullrich. The lean 4 theorem prover and programming language. In International Conference on Automated Deduction, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.289, + 0.66, + 0.306 + ], + "angle": 0, + "content": "[19] L. C. Paulson. Isabelle: A generic theorem prover. Springer, 1994." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.316, + 0.883, + 0.348 + ], + "angle": 0, + "content": "[20] S. Polu and I. Sutskever. Generative language modeling for automated theorem proving. arXiv preprint arXiv:2009.03393, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.359, + 0.883, + 0.391 + ], + "angle": 0, + "content": "[21] S. Polu, J. M. Han, K. Zheng, M. Baksys, I. Babuschkin, and I. Sutskever. Formal mathematics statement curriculum learning. arXiv preprint arXiv:2202.01344, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.402, + 0.883, + 0.45 + ], + "angle": 0, + "content": "[22] R. Rafailov, A. Sharma, E. Mitchell, C. D. Manning, S. Ermon, and C. Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems (NeurIPS), 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.46, + 0.883, + 0.509 + ], + "angle": 0, + "content": "[23] Z. Shao, P. Wang, Q. Zhu, R. Xu, J. Song, X. Bi, H. Zhang, M. Zhang, Y. Li, Y. Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.519, + 0.881, + 0.552 + ], + "angle": 0, + "content": "[24] Q. Team. Qwen2.5: A party of foundation models, September 2024. URL https://qwen.lm.github.io/blog/qwen2.5/." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.563, + 0.881, + 0.595 + ], + "angle": 0, + "content": "[25] Q. Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025. URL https://qwenlm.github.io/blog/qwq-32b/." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.605, + 0.881, + 0.637 + ], + "angle": 0, + "content": "[26] T. H. Trinh, Y. Wu, Q. V. Le, H. He, and T. Luong. Solving olympiad geometry without human demonstrations. Nature, 625(7995):476-482, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.647, + 0.883, + 0.696 + ], + "angle": 0, + "content": "[27] Z. Wan, Y. Li, Y. Song, H. Wang, L. Yang, M. Schmidt, J. Wang, W. Zhang, S. Hu, and Y. Wen. Rema: Learning to meta-think for llms with multi-agent reinforcement learning. arXiv preprint arXiv:2503.09501, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.706, + 0.881, + 0.74 + ], + "angle": 0, + "content": "[28] R. Wang, J. Zhang, Y. Jia, R. Pan, S. Diao, R. Pi, and T. Zhang. Theoremlama: Transforming general-purpose llms into lean4 experts. arXiv preprint arXiv:2407.03203, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.749, + 0.883, + 0.799 + ], + "angle": 0, + "content": "[29] R. Wang, R. Pan, Y. Li, J. Zhang, Y. Jia, S. Diao, R. Pi, J. Hu, and T. Zhang. Ma-lot: Multiagent lean-based long chain-of-thought reasoning enhances formal theorem proving. arXiv preprint arXiv:2503.03205, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.808, + 0.883, + 0.857 + ], + "angle": 0, + "content": "[30] Z. Wu, S. Huang, Z. Zhou, H. Ying, J. Wang, D. Lin, and K. Chen. Internl m2.5-Stepprover: Advancing automated theorem proving via expert iteration on large-scale lean problems. arXiv preprint arXiv:2410.15700, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.867, + 0.881, + 0.9 + ], + "angle": 0, + "content": "[31] Z. Wu, J. Wang, D. Lin, and K. Chen. Lean-github: Compiling github lean repositories for a versatile lean prover. arXiv preprint arXiv:2407.17227, 2024." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.101, + 0.885, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.924, + 0.51, + 0.936 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.101, + 0.886, + 0.151 + ], + "angle": 0, + "content": "[32] H. Xin, D. Guo, Z. Shao, Z. Ren, Q. Zhu, B. Liu, C. Ruan, W. Li, and X. Liang. Deepseek-prover: Advancing theorem proving in llms through large-scale synthetic data. arXiv preprint arXiv:2405.14333, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.16, + 0.884, + 0.21 + ], + "angle": 0, + "content": "[33] H. Xin, Z. Ren, J. Song, Z. Shao, W. Zhao, H. Wang, B. Liu, L. Zhang, X. Lu, Q. Du, et al. Deepseek-prover-v1. 5: Harnessing proof assistant feedback for reinforcement learning and monte-carlo tree search. arXiv preprint arXiv:2408.08152, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.218, + 0.886, + 0.268 + ], + "angle": 0, + "content": "[34] R. Xin, C. Xi, J. Yang, F. Chen, H. Wu, X. Xiao, Y. Sun, S. Zheng, and K. Shen. Bfs-prover: Scalable best-first tree search for llm-based automatic theorem proving. arXiv preprint arXiv:2502.03438, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.277, + 0.886, + 0.328 + ], + "angle": 0, + "content": "[35] K. Yang, A. Swope, A. Gu, R. Chalamala, P. Song, S. Yu, S. Godil, R. J. Prenger, and A. Anandkumar. Leandojo: Theorem proving with retrieval-augmented language models. Advances in Neural Information Processing Systems (NeurIPS), 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.336, + 0.886, + 0.386 + ], + "angle": 0, + "content": "[36] H. Ying, Z. Wu, Y. Geng, J. Wang, D. Lin, and K. Chen. Lean workbook: A large-scale lean problem set formalized from natural language math problems. arXiv preprint arXiv:2406.03847, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.395, + 0.886, + 0.445 + ], + "angle": 0, + "content": "[37] W. Zeng, Y. Huang, Q. Liu, W. Liu, K. He, Z. Ma, and J. He. Simplerl-zoo: Investigating and taming zero reinforcement learning for open base models in the wild. arXiv preprint arXiv:2503.18892, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.455, + 0.886, + 0.489 + ], + "angle": 0, + "content": "[38] X. Zhao, W. Wu, J. Guan, and L. Kong. Promptcot: Synthesizing olympiad-level problems for mathematical reasoning in large language models. arXiv preprint arXiv:2503.02324, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.497, + 0.886, + 0.532 + ], + "angle": 0, + "content": "[39] K. Zheng, J. M. Han, and S. Polu. Minif2f: a cross-system benchmark for formal olympiad-level mathematics. arXiv preprint arXiv:2109.00110, 2021." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.101, + 0.886, + 0.532 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.923, + 0.511, + 0.936 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.115, + 0.101, + 0.221, + 0.119 + ], + "angle": 0, + "content": "Appendix" + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.131, + 0.338, + 0.147 + ], + "angle": 0, + "content": "A. Formalizer Details" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.162, + 0.884, + 0.194 + ], + "angle": 0, + "content": "We start with Qwen25-Coder-32B-Instruct (Hui et al., 2024) and use following instruct prompt to train the formalizer:" + }, + { + "type": "title", + "bbox": [ + 0.423, + 0.213, + 0.576, + 0.225 + ], + "angle": 0, + "content": "Formalizer Prompt" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.23, + 0.838, + 0.254 + ], + "angle": 0, + "content": "Please translate the mathematical statement {informal_statement} into a theorem statement in Lean 4 code." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.263, + 0.824, + 0.287 + ], + "angle": 0, + "content": "Please do not generate codes of proof or comment sentences (e.g., starting with '/-' or '-')." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.297, + 0.766, + 0.311 + ], + "angle": 0, + "content": "The Lean 4 codes are required to complete the 'statement' in the following text:" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.328, + 0.219, + 0.341 + ], + "angle": 0, + "content": "\" ' lean4" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.343, + 0.576, + 0.366 + ], + "angle": 0, + "content": "theorem lean_workbook 'statement' := by sorry" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.41, + 0.885, + 0.522 + ], + "angle": 0, + "content": "As shown in Table 3, the formalizer is targeted to translate the natural language statement to formal statement in Lean 4 codes. We take use of the 29.7K data released by Goedel-Prover (Lin et al., 2025), which provides pairs of informal statement and formal statement in each sample. We train the formalizer with a fixed learning rate \\(5 \\times 10^{-6}\\) for 2 epochs. We verify the Compiling Correctness (CC) Test, and Faithfulness and Completeness (FC) Test by following the prompts in Goedel-Prover. As shown in Table 4, our formalizer performs similarly to the Formalizer A (Lin et al., 2025)." + }, + { + "type": "table", + "bbox": [ + 0.117, + 0.533, + 0.885, + 0.695 + ], + "angle": 0, + "content": "
Example 1Example 2
Informal StatementSolve for x in the given inequality: x2-2x-24<0Prove that ln(eπ) is equal to π.
Formalizer Outputtheorem lean_workbook (x : R): x^2 - 2*x - 24 < 0 ↔ x ∈ Set.Ioo (-4) 6 := by sorrytheorem lean_workbook : Real.log (Real.exp π) = π := by sorry
" + }, + { + "type": "table_caption", + "bbox": [ + 0.114, + 0.705, + 0.688, + 0.722 + ], + "angle": 0, + "content": "Table 3 | Examples of formalizer inputs and outputs for two examples." + }, + { + "type": "table", + "bbox": [ + 0.253, + 0.74, + 0.747, + 0.822 + ], + "angle": 0, + "content": "
ModelCC Test (%)FC Test (%)
Formalizer A (Lin et al., 2025)76.748.1
Formalizer B (Lin et al., 2025)88.580.4
Ours Formalizer77.649.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.832, + 0.884, + 0.865 + ], + "angle": 0, + "content": "Table 4 | Quality assessment of the formalized statement. \"CC\" refers to Compiling Correctness (CC) Test and \"FC\" refers to Faithfulness and Completeness (FC) Test." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.924, + 0.51, + 0.936 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.116, + 0.1, + 0.43, + 0.119 + ], + "angle": 0, + "content": "B. Cognitive Behaviors Design" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.133, + 0.382, + 0.15 + ], + "angle": 0, + "content": "B.1. Lean Completion Example" + }, + { + "type": "code_caption", + "bbox": [ + 0.116, + 0.16, + 0.166, + 0.176 + ], + "angle": 0, + "content": "Input:" + }, + { + "type": "code", + "bbox": [ + 0.115, + 0.181, + 0.884, + 0.895 + ], + "angle": 0, + "content": "Complete the following Lean 4 code with explanatory comments preceding each line of code: \n' \\(\\mathit{lean4}\\) \nimport Mathlib \nimport Aesop \nset_option maxHeartbeats O \nopen BigOperators Real Nat Topology Rat \n/\\~ Given \\(\\sin (\\frac{\\pi}{4} -x) = \\frac{3}{5}\\) , find the value of sin2x. Show that it is \\(\\backslash\\) frac{7}{25}. - / theorem sin_2x_value (x : R) (h : sin (Real.pi / 4 - x) = 3 / 5) : sin (2 * x) \\(= 7 / 25\\coloneqq\\) by \n/\\~ Given \\(\\backslash (\\backslash \\sin \\backslash \\text{left} (\\backslash \\text{frac} (\\backslash \\text{pi})\\{4\\} -\\text{x}\\backslash \\text{right}) = \\backslash \\text{frac} (3)\\{5\\} \\backslash)\\) , we need to find the value of \\(\\backslash (\\backslash \\sin 2x\\backslash)\\) . We start by using the trigonometric identity for the sine of a difference and the double-angle formula for sine. Specifically, we use the identity \\(\\backslash (\\backslash \\sin (a - b) = \\backslash \\sin a\\backslash \\cos b - \\backslash \\cos a\\backslash \\sin b\\backslash)\\) and the double-angle formula \\(\\backslash (\\backslash \\sin 2x = 2\\backslash \\sin x\\backslash \\cos x\\backslash)\\) . 1. Substitute \\(\\backslash (\\backslash \\sin \\backslash \\text{left} (\\backslash \\text{frac} (\\backslash \\text{pi})\\{4\\} -\\text{x}\\backslash \\text{right}) = \\backslash \\text{frac} (3)\\{5\\} \\backslash)\\) into the identity for the sine of a difference: [ \\\\sin \\(\\backslash\\) left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right) \\(=\\) \\\\sin \\(\\backslash\\) left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right) \\(=\\) \\\\sin \\(\\backslash\\) left(a-b) \\(=\\) \\\\sin a\\cos b- cos a\\sin b) and the double-angle formula \\(\\backslash (\\backslash \\sin 2x = 2\\backslash \\sin x\\backslash \\cos x\\backslash)\\) \n] \n2. Calculate \\(\\backslash (\\backslash \\sin \\backslash \\text{left} (\\backslash \\text{frac} (\\backslash \\text{pi})\\{4\\} -\\text{x}\\backslash \\text{right}) = \\backslash \\text{frac} (3)\\{5\\} \\backslash)\\) into \\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right) and \\\\( (\\cos \\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right)\\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right)\\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right)\\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right)\\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right)\\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.923, + 0.51, + 0.936 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "code", + "bbox": [ + 0.116, + 0.101, + 0.885, + 0.318 + ], + "angle": 0, + "content": "\\sin 2x = 2 \\sin \\left(\\text {left} (\\text {x} \\text {right}) \\cos \\left(\\text {left} (\\text {x} \\text {right}) \\right.\\right) \n\\] \n7. Substitute \\(\\backslash (\\backslash \\cos \\backslash \\text {left} (\\text {x} \\backslash \\text {right}) - \\backslash \\sin \\backslash \\text {left} (\\text {x} \\backslash \\text {right}) = \\backslash \\operatorname{frac}\\{3\\) \n\\(\\backslash \\operatorname{sqrt}\\{2\\} \\} \\{5\\} \\backslash)\\) into the equation: \n\\[\n\\begin{array}{l}\n\\backslash [ \\\\\n\\sin 2x = 2 \\backslash \\sin \\backslash \\text {left} (\\text {x} \\backslash \\text {right}) \\cos \\backslash \\text {left} (\\text {x} \\backslash \\text {right}) = 2 \\backslash \\operatorname{left} (\\backslash \\operatorname{frac}\\{3\n\\} \\backslash \\operatorname{sqrt}\\{2\\}) \\{5\\} \\backslash \\text {right}) \\backslash \\operatorname{left} (\\backslash \\operatorname{frac}\\{\\backslash \\operatorname{sqrt}\\{2\\}\\} \\{2\\} \\backslash \\text {right}) = 2 \\backslash \\operatorname{cdot} \\backslash \\operatorname{frac}\\{3\n\\} \\backslash \\operatorname{sqrt}\\{2\\}) \\{5\\} \\backslash \\operatorname{cdot} \\backslash \\operatorname{frac}\\{\\backslash \\operatorname{sqrt}\\{2\\}\\} \\{2\\} = \\backslash \\operatorname{frac}\\{3 \\backslash \\operatorname{sqrt}\\{2\\}\\} \\{5\\} \\backslash \\operatorname{cdot}\n\\end{array}\n\\] \n\\(\\backslash\\) \n8. Simplify the final expression: \n\\[\n\\begin{array}{l}\n\\backslash [ \\\\\n\\sin 2x = \\backslash \\operatorname{frac}\\{7\\} \\{25\\}\n\\end{array}\n\\]" + }, + { + "type": "code_caption", + "bbox": [ + 0.115, + 0.335, + 0.221, + 0.353 + ], + "angle": 0, + "content": "Wrong Proof:" + }, + { + "type": "code", + "bbox": [ + 0.116, + 0.357, + 0.885, + 0.503 + ], + "angle": 0, + "content": "-- Use the trigonometric identity for the sine of a difference and the double-angle formula for sine. \nsimp [Real.sin_sub, Real.sin_two.mul, Real.cos_two.mul, Real.sin(pi_div_four, Real.cos(pi_div_four] at h \\(\\vdash\\) \n-- Simplify the expression using field operations. \nfield_simp at h \\(\\vdash\\) \n-- Normalize the expression by expanding and simplifying it. \nring_nf at h \\(\\vdash\\) \n-- Use non-linear arithmetic to verify the final result. \nnlinarith [Real.sin_sq_add_cos_sq x, Real.sin_le_one x, Real.cos_le_one x]" + }, + { + "type": "code_caption", + "bbox": [ + 0.115, + 0.521, + 0.286, + 0.538 + ], + "angle": 0, + "content": "Tactic Error Messages:" + }, + { + "type": "code", + "bbox": [ + 0.115, + 0.542, + 0.5, + 0.646 + ], + "angle": 0, + "content": "linarith failed to find a contradiction \ncase h1.h \nx: \\(\\mathbb{R}\\) \nh: \\(\\sqrt{2} * x.\\cos * 5 - \\sqrt{2} * x.\\sin * 5 = 6\\) \nat: x.cos * x.sin * 50 < 7 \n\\(\\vdash\\) False \nfailed" + }, + { + "type": "code_caption", + "bbox": [ + 0.115, + 0.665, + 0.279, + 0.682 + ], + "angle": 0, + "content": "Searched Valid Proof:" + }, + { + "type": "code", + "bbox": [ + 0.116, + 0.687, + 0.885, + 0.845 + ], + "angle": 0, + "content": "-- Use the trigonometric identity for the sine of a difference and the double-angle formula for sine. \nsimp [Real.sin_sub, Real.sin_two.mul, Real.cos_two.mul, Real.sin(pi_div_four, Real.cos(pi_div_four] at h \\(\\vdash\\) \n-- Simplify the expression using field operations. \nfield_simp at h \\(\\vdash\\) \n-- Normalize the expression by expanding and simplifying it. \nring_nf at h \\(\\vdash\\) \n-- Use the Pythagorean identity to simplify further. \nnlinarith [Real.sin_sq_add_cos_sq x, Real.sqrt_nonneg 2, sq_sqrt (show \\(0 \\leq\\) 2 by norm_num)]\"" + }, + { + "type": "code_caption", + "bbox": [ + 0.115, + 0.864, + 0.216, + 0.879 + ], + "angle": 0, + "content": "Claude CoTs:" + }, + { + "type": "code", + "bbox": [ + 0.116, + 0.888, + 0.187, + 0.901 + ], + "angle": 0, + "content": "" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.923, + 0.51, + 0.936 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.115, + 0.102, + 0.867, + 0.146 + ], + "angle": 0, + "content": "let's verify this proof to make sure it's working correctly. I need to check each step of the proof to ensure it's valid and identify any potential issues." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.159, + 0.77, + 0.19 + ], + "angle": 0, + "content": "First, let's examine what we're trying to prove. It appears to be a trigonometric identity involving sine and cosine functions." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.202, + 0.331, + 0.224 + ], + "angle": 0, + "content": "The proof begins with:" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.23, + 0.862, + 0.268 + ], + "angle": 0, + "content": "simp [Real.sin_sub, Real.sin_two.mul, Real.cos_two.mul, Real.sin(pi_div_four, Real.cos(pi_div_four] at h \\(\\vdash\\)" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.273, + 0.865, + 0.303 + ], + "angle": 0, + "content": "This step seems reasonable - we're using standard trigonometric identities to simplify both the hypothesis \\( h \\) and the goal." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.316, + 0.254, + 0.338 + ], + "angle": 0, + "content": "Next, we have:" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.344, + 0.281, + 0.367 + ], + "angle": 0, + "content": "field_simp at h" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.372, + 0.806, + 0.402 + ], + "angle": 0, + "content": "This tactic simplifies expressions involving field operations, which is appropriate for real-valued trigonometric expressions." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.415, + 0.166, + 0.438 + ], + "angle": 0, + "content": "Then:" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.444, + 0.254, + 0.466 + ], + "angle": 0, + "content": "ring_nf at h" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.472, + 0.836, + 0.501 + ], + "angle": 0, + "content": "This normalizes ring expressions, which should help simplify our algebraic expressions further." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.515, + 0.281, + 0.537 + ], + "angle": 0, + "content": "Finally, we have:" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.542, + 0.833, + 0.566 + ], + "angle": 0, + "content": "nlinarith [Real.sin_sq_add_cos_sq x, Real.sin_le_one x, Real.cos_le_one x]" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.585, + 0.87, + 0.628 + ], + "angle": 0, + "content": "This is where I need to be careful. The 'nlinarith' tactic is trying to use non-linear arithmetic with the Pythagorean identity \\(\\sin^2 (x) + \\cos^2 (x) = 1'\\) and the bounds on sine and cosine functions." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.642, + 0.841, + 0.715 + ], + "angle": 0, + "content": "The issue might be that we're using bounds ('Real.sin_le_one x, Real.cos_le_one x') rather than equalities. When dealing with trigonometric identities, we often need exact relationships rather than just bounds. The Pythagorean identity is an equality, but we're also including inequalities in our reasoning." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.727, + 0.867, + 0.786 + ], + "angle": 0, + "content": "A better approach would be to focus on using the Pythagorean identity directly, possibly with additional facts about square roots. The `sqrt(2)` in our expressions suggests we might need properties about square roots, such as `sq_sqrt` to handle expressions like `(\\sqrt{2})^{\\sim}`." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.798, + 0.88, + 0.843 + ], + "angle": 0, + "content": "Additionally, we should ensure that we're providing all the necessary facts to the arithmetic solver. For instance, we might need to explicitly include that '\\sqrt{2} \\geq 0'\\), which could be done with 'Real.sqrt_nonneg \\(2'\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.923, + 0.51, + 0.936 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.116, + 0.102, + 0.321, + 0.119 + ], + "angle": 0, + "content": "B.2. Rewriting Example" + }, + { + "type": "code_caption", + "bbox": [ + 0.116, + 0.129, + 0.166, + 0.144 + ], + "angle": 0, + "content": "Input:" + }, + { + "type": "code", + "bbox": [ + 0.115, + 0.149, + 0.884, + 0.369 + ], + "angle": 0, + "content": "Complete the following Lean 4 code with explanatory comments preceding each line of code: \n```\n```\n```lean4\nimport Mathlib\nimport Aesop\nset_option maxHeartbeats 0\nopen BigOperators Real Nat Topology Rat\n-- Prove that if \\( n \\geq 2 \\), then \\( n \\) distinct integers can be given such that the sum of any two of them is divisible by their difference.-/\ntheorem lean_workbook : ∀ n : N, 2 ≤ n → ∃ l : List Z, l.length = n ∧ ∀ x y, x ∈ l ∧ y ∈ l ∧ x ≠ y → (x + y) % (x - y) = 0 := by" + }, + { + "type": "code_caption", + "bbox": [ + 0.116, + 0.383, + 0.221, + 0.4 + ], + "angle": 0, + "content": "Wrong Proof:" + }, + { + "type": "code", + "bbox": [ + 0.115, + 0.404, + 0.884, + 0.709 + ], + "angle": 0, + "content": "- We need to prove that for any natural number \\((n \\backslash \\text{geq} 2)\\), there exists a list of \\((n \\backslash)\\) distinct integers such that the sum of any two integers in the list is divisible by their difference. \n1. **Base Case**: When \\((n = 2)\\), we can choose the list \\(([0, 1])\\). For any \\((x \\backslash)\\) and \\((y \\backslash)\\) in this list, if \\((x \\backslash \\text{neq} y)\\), then \\((x + y) \\mod (x - y) = 0\\). \n2. **Inductive Step**: Assume the statement holds for some \\((n \\backslash \\text{geq} 2)\\). We need to show it holds for \\((n + 1)\\). By the inductive hypothesis, there exists a list of \\((n \\backslash)\\) distinct integers \\((1 \\backslash)\\) such that the sum of any two elements in \\((1 \\backslash)\\) is divisible by their difference. We can add one more integer to this list and show that the new list still satisfies the condition. \n- / \nintro n hn \n-- Use a base case for \\(n = 2\\) \nuse [0, 1] \n-- Verify that the base case satisfies the conditions \nsimp \n-- Use Aesop to handle the general case \n<;> aesop" + }, + { + "type": "code_caption", + "bbox": [ + 0.116, + 0.723, + 0.348, + 0.74 + ], + "angle": 0, + "content": "Revised Comments and Proofs:" + }, + { + "type": "code", + "bbox": [ + 0.115, + 0.744, + 0.884, + 0.905 + ], + "angle": 0, + "content": "/\\-We need to prove that for any natural number \\(\\backslash\\) (n\\geq 2 \\), there exists a list of \\(\\backslash\\) (n \\) distinct integers such that the sum of any two integers in the list is divisible by their difference. \n1. \\*\\*Base Case**: When \\(\\backslash\\) ( \\(n = 2\\backslash\\) ), we can choose the list \\(\\backslash ([0,2]\\backslash)\\). For any \\(\\backslash\\) (x \\) and \\(\\backslash\\) y \\) in this list, if \\(\\backslash\\) (x\\neq y \\), then \\(\\backslash\\) (x + y) mod (x - y) = 0 \\). For example, \\(\\backslash\\) (0 + 2) mod (0 - 2) = 2 \\mod -2 = 0 \\). \n2. \\*\\*Inductive Step**: Assume the statement holds for some \\(\\backslash\\) (n\\geq 2 \\). We need to show it holds for \\(\\backslash\\) (n + 1 \\). By the inductive hypothesis, there" + }, + { + "type": "page_number", + "bbox": [ + 0.488, + 0.924, + 0.51, + 0.936 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "code", + "bbox": [ + 0.116, + 0.101, + 0.885, + 0.407 + ], + "angle": 0, + "content": "exists a list of \\(\\backslash\\) (n \\(\\backslash\\)) distinct integers \\(\\backslash\\) (\\{a_1, a_2, \\ldots, a_n\\} \\}) such that the sum of any two elements in this list is divisible by their difference. We can add one more integer to this list to form a new list of \\(\\backslash\\) (n + 1 \\(\\backslash\\)) integers. A good choice for the new integer is \\(\\backslash\\) (a_n+1 \\() =\\) a_n + (n+1)! \\)). This ensures that for any \\(\\backslash\\) (i \\(\\backslash\\)) and \\(\\backslash\\) (j \\(\\backslash\\)) in the range \\(\\backslash\\) (1 \\(\\backslash\\) leq i, j \\(\\backslash\\) leq n+1 \\(\\backslash\\)) with \\(\\backslash\\) (i \\(\\backslash\\) neq j \\(\\backslash\\)), the sum \\(\\backslash\\) (a_i + a_j \\(\\backslash\\)) is divisible by their difference \\(\\backslash\\) (a_i - a_j \\(\\backslash\\)). This is because \\(\\backslash\\) (a_i + a_j) - (a_i - a_j) = 2a_j \\), and since \\(\\backslash\\) (a_j \\(\\backslash\\)) is an integer, \\(\\backslash\\) (2a_j \\(\\backslash\\)) is always divisible by \\(\\backslash\\) (a_i - a_j \\)). Thus, the list \\(\\backslash\\) (\\{a_1, a_2, \\ldots, a_ldots, a_n, a_{\\{n+1\\}}\\} \\}) satisfies the condition, completing the inductive step. \n- \n- Introduce the natural number \\(n\\) and the assumption that \\(n \\geq 2\\) \nintro n hn \n- Use a construction to construct the list of integers \nuse List.replicate n 0 \n- Simplify the conditions given the assumption \\(n \\geq 2\\) \nsimp [hn, List.length, List.mem_replicate, List.mem_range, List.mem_filter] \n- Use Aesop to complete the proof \n<;> aesop\"" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.421, + 0.218, + 0.435 + ], + "angle": 0, + "content": "Claude CoTs:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.444, + 0.777, + 0.473 + ], + "angle": 0, + "content": "Let me verify my proof for this problem about distinct integers with divisibility properties..." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.487, + 0.867, + 0.528 + ], + "angle": 0, + "content": "Looking at my current solution, I'm trying to prove that for any \\( n \\geq 2 \\), there exist \\( n \\) distinct integers where the sum of any two is divisible by their difference." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.543, + 0.856, + 0.571 + ], + "angle": 0, + "content": "Wait, I see an issue here. I've used '[0, 1]' as my example list, but let me check if this actually satisfies the condition:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.572, + 0.854, + 0.587 + ], + "angle": 0, + "content": "- For \\( x = 0 \\) and \\( y = 1 \\): \\( (x + y) \\% (x - y) = (0 + 1) \\% (0 - 1) = 1 \\% (-1) = 0 \\)?" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.6, + 0.841, + 0.642 + ], + "angle": 0, + "content": "Let me verify this calculation. We have \\( 1\\% \\) (-1) which should be the remainder when 1 is divided by -1. Since -1 divides 1 as \\( -1 \\times (-1) = 1 \\), the remainder is 0. So this seems correct." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.657, + 0.854, + 0.693 + ], + "angle": 0, + "content": "But wait, there's another issue. I've only constructed a list for \\( n = 2 \\), but my proof needs to work for any \\( n \\geq 2 \\). The line:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.7, + 0.214, + 0.722 + ], + "angle": 0, + "content": "use [0, 1]" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.728, + 0.844, + 0.756 + ], + "angle": 0, + "content": "only provides a list of length 2, but I need to handle cases where \\( n > 2 \\) as well." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.771, + 0.837, + 0.828 + ], + "angle": 0, + "content": "I need to generalize my construction. Perhaps I should create a list of length \\( n \\) instead of just assuming the base case will handle everything. The 'simp' and 'aesop' tactics might not be able to automatically construct this list for arbitrary \\( n \\)." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.842, + 0.868, + 0.885 + ], + "angle": 0, + "content": "Let me reconsider this approach... What if I use a more general construction? I could use 'List.replicate n 0' to create a list of \\( \\mathbf{n} \\) zeros. But then all elements would be the same, and the problem requires distinct integers." + }, + { + "type": "page_number", + "bbox": [ + 0.488, + 0.923, + 0.508, + 0.936 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.115, + 0.102, + 0.834, + 0.147 + ], + "angle": 0, + "content": "Actually, I should think more carefully about what list would satisfy the divisibility condition. If I have integers \\( a \\) and \\( b \\) where \\( a \\neq b \\), then I need \\( (a + b) \\% (a - b) = 0 \\)." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.159, + 0.85, + 0.203 + ], + "angle": 0, + "content": "For the general case, I might need to use a more sophisticated construction like arithmetic sequences or something based on factorials to ensure the divisibility property holds." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.216, + 0.868, + 0.275 + ], + "angle": 0, + "content": "The issue with my current proof is that I'm not providing a general construction for \\( n \\) elements - I'm only handling \\( n = 2 \\). And even if 'aesop' could somehow extend this, I haven't given it enough guidance on how to construct the larger list." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.287, + 0.331, + 0.301 + ], + "angle": 0, + "content": "To fix this, I should:" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.302, + 0.682, + 0.316 + ], + "angle": 0, + "content": "1. Either provide an explicit construction for arbitrary \\( n \\)" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.316, + 0.834, + 0.331 + ], + "angle": 0, + "content": "2. Or use induction properly, showing how to extend from \\( n \\) to \\( n + 1 \\) elements" + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.302, + 0.834, + 0.331 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.343, + 0.859, + 0.43 + ], + "angle": 0, + "content": "I think I need to reconsider what list would work for this problem. Maybe something like powers of 2, or a list where consecutive elements have a specific relationship? The factorial approach mentioned in my comment might be promising - adding elements like \\( \\mathbf{a}_1 \\), \\( \\mathbf{a}_2 \\), ..., \\( \\mathbf{a}_n \\) where each new element is constructed to maintain the divisibility relationship with all previous elements." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.443, + 0.838, + 0.502 + ], + "angle": 0, + "content": "I should try constructing a list like \\([0, 1!, 2!, \\ldots, (n-1)!!]\\) or something similar and verify that it satisfies the divisibility condition. This would give me distinct integers with potentially useful divisibility properties." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.537, + 0.259, + 0.554 + ], + "angle": 0, + "content": "C. RL Details" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.569, + 0.884, + 0.601 + ], + "angle": 0, + "content": "Figure 6 shows the overall distribution of valid proofs in pass@32 testing. We collect the distribution base on our Leanabell-Prover-GD-SFT model." + }, + { + "type": "page_number", + "bbox": [ + 0.488, + 0.923, + 0.51, + 0.936 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.123, + 0.328, + 0.874, + 0.608 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.117, + 0.624, + 0.882, + 0.673 + ], + "angle": 0, + "content": "Figure 6 | Distribution of valid proofs in pass@32. We calculate this distribution based on the Leanabell-Prover-GD-SFT model. To build this set, we remain the formal statements who has at least sampled proofs are valid by Lean 4 compiler." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.924, + 0.509, + 0.935 + ], + "angle": 0, + "content": "23" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06122/1f4b320c-f80c-4d99-b14d-3a49e20634f9_origin.pdf b/data/2025/2504_06xxx/2504.06122/1f4b320c-f80c-4d99-b14d-3a49e20634f9_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a0b031681bc93496b2ff2fe60aea5b30b073177b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/1f4b320c-f80c-4d99-b14d-3a49e20634f9_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1818e9ca7c806aef4dece71a11d806f3fe3f7d4b5478242b20784dc812f5fc33 +size 449995 diff --git a/data/2025/2504_06xxx/2504.06122/full.md b/data/2025/2504_06xxx/2504.06122/full.md new file mode 100644 index 0000000000000000000000000000000000000000..5aedd7cb02e0189559d481c363e701742fb474e4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/full.md @@ -0,0 +1,515 @@ +# Leanabell-Prover: Posttraining Scaling in Formal Reasoning + +Jingyuan Zhang, Qi Wang, Xingguang Ji, Yahui Liu, Yang Yue, Fuzheng Zhang, Di Zhang, Guorui Zhou, Kun Gai + +Kuaishou Technology + +# Abstract + +Recent advances in automated theorem proving (ATP) through LLMs have highlighted the potential of formal reasoning with Lean 4 codes. However, ATP has not yet been revolutionized by the recent posttraining scaling as demonstrated by Open AI O1/O3 and Deepseek R1. In this work, we investigate the entire posttraining of ATP, aiming to align it with breakthroughs in reasoning models in natural languages. To begin, we continual train current ATP models with a hybrid dataset, which consists of numerous statement-proof pairs, and additional data aimed at incorporating cognitive behaviors that emulate human reasoning and hypothesis refinement. Next, we explore reinforcement learning with the use of outcome reward returned by Lean 4 compiler. Through our designed continual training and reinforcement learning processes, we have successfully improved existing formal provers, including both DeepSeek-Prover-v1.5 and Goedel-Prover, achieving state-of-the-art performance in the field of whole-proof generation. For example, we achieve a $59.8\%$ pass rate (pass@32) on MiniF2F. This is an on-going project and we will progressively update our findings, release our data and training details. + +![](images/d54d3dc99f0f5608177788c79a253bd2e0a99094c8f633e30787e913f16ecf88.jpg) +Figure 1 | Benchmark performance on MiniF2F-test (Zheng et al., 2021). Our method boosts both the two baseline models after employing RL training. Goedel-Prover-RL is our implementation. Our framework surpasses DeepSeek-Prover-v1.5-RL and Goedel-Prover-SFT $6.6\%$ and $2.2\%$ , respectively. + +# 1. Introduction + +Recent large language models (LLMs), such as Open AI O1/O3 and Deepseek R1, which are enhanced by posttraining scaling, emerge with numerous powerful and intriguing reasoning behaviors (Guo et al., 2025; Anthropic, 2025; Team, 2025). Such LLMs have shown impressive performance in solving math problems with natural language. However, the long chain-of-thoughts (CoTs) and final answers in natural language (NL) are substantially challenging for peer review (Wang et al., 2024), especially for theorem proving. Meanwhile, the key advantage of formal languages lies in their verifiability—each reasoning step can be validated by formal theorem verifiers, e.g., Lean (De Moura et al., 2015; Moura and Ullrich, 2021) and Isabelle (Paulson, 1994). As a promising direction, automated theorem proving (ATP) with formal languages (FL) has attracted booming attention from the community of large language models (LLMs). + +Contrary to solving math problems with natural language, generating proofs using Lean 4 codes (or other formal languages) is more challenging. For example, DeepSeek-Prover v1.5-RL (Xin et al., 2024) achieves only $50.0\%$ (pass@32) on the Olympiad-level mathematics benchmark MiniF2F (Zheng et al., 2021). However, DeepSeek-R1 (Guo et al., 2025) can achieve $100\%$ on the same math problems, but in natural language. DeepSeek-Prover-v1.5 (Lin et al., 2025) and STP (Dong and Ma, 2025) show that using extensive synthetic dataset of formal statements and expert iteration (Polu et al., 2022) can boost the whole-proof prover. Meanwhile, some methods (Yang et al., 2023; Wu et al., 2024; Xin et al., 2025) scale up the search budget (e.g., more than 2 million in BFS-Prover (Xin et al., 2025)) for step-wise tactic generation, which seems extremely computational. + +Although RL strategies have already proven their effectiveness in natural language for math problem solving, the performance in formal reasoning has been rather ordinary so far. We find that only Deepseek-Prover releases it RL version, and DeepSeek-Prover-v1.5-RL marginally improves $1.8\%$ than its supervised fine-tuned model. Compared to the success of reinforcement learning (RL) in natural language reasoning, the potential for improvement in formal language reasoning may still be vast. However, replicating the current successful RL training approaches, which primarily focus on the Qwen2.5 model series, is not straightforward. + +To verify the posttraining scaling in ATP, we begin with the standard whole-proof generation models DeepSeek-Prover-v1.5-SFT and Goedel-Prover-SFT. There are three training stages in our optimization framework. We first collect public and synthetic data to continue training. We also utilize automatic synthetic CoT data to embed the self-reflection capabilities, such as backtracking (abandoning failing approaches) and verification (systematic error-checking), to the fine-tuned model. Next, we employ the GRPO algorithm (Shao et al., 2024) to perform reinforcement learning from proof assistant feedback (RLPAF) on the supervised fine-tuned model. Similar to DeepSeek-Prover-v1.5-RL, the verification results from the Lean compiler serve as reward supervision. After analyzing the validation results on benchmarks, we find our posttraining strategies can effectively boost the overall performance on MiniF2F (Zheng et al., 2021) benchmark. + +In summary, here are our main contributions: + +- We continue train current APT models with more high quality statement-proof data pairs. More importantly, we design synthetic data to enhance the models' self-reflection capabilities, enabling us to pilot cognitive behaviors in our models before applying the RL algorithm. +- We investigate the RL training to boost the ATP prover that generates whole mathematical proofs in Lean 4 codes. During training, we employ the Lean 4 verifier to serve as a reward + +supervision. + +- The proposed Leanabelle-Prover achieves state-of-the-art performance through our meticulously designed strategy, $59.8\%$ (pass@32) on MiniF2F-test. +- Currently, we collect around 1.52M formal statements, and 0.22M formal statements with detailed informal CoTs and verified proofs. All intermediate models and training data are released to the community1. + +# 2. Related Work + +**Lean4 Theorem Proving using LLMs.** With the rapid progress of LLMs, research has explored applying LLMs in FL reasoning to automate theorem proving. Prior research can be briefly classified into two strategies, namely proof-step generation and whole-proof generation. + +Proof-step generation methods train an LLM agent to iteratively generate proof steps by predicting the next tactic based on the current proof state (Polu and Sutskever, 2020; Polu et al., 2022; Lample et al., 2022; Azerbayev et al., 2023; Yang et al., 2023; Lin et al., 2024; DeepMind, 2024; Trinh et al., 2024; Wu et al., 2024; Xin et al., 2024; Li et al., 2024; Xin et al., 2025). These methods apply FL executor to verify after each step of generation and is able to discover some non-trivial proofs. For example, LeanDojo (Yang et al., 2023) first establishes relationship models between various tactic states within proofs. It then retrieves relevant premises from the mathematical library based on the current output state (as collected from a Lean verifier) and inputs these premises into an encoder-decoder model to generate the subsequent tactic. Employing Monte-Carlo tree search (MCTS) (Coulom, 2006) is another common solution in this field. However, as the complexity of the proof increases, tree search methods become computationally expensive and lack high-level NL planning to control the overall structure of the proof (Wang et al., 2025). + +Whole-proof generation methods treat theorem proving as a kind of code generation problem, where LLMs generate the entire proof in a single attempt by using supervised training or prompt engineering (Xin et al., 2024; Lin et al., 2025; Dong and Ma, 2025; Wang et al., 2025). This approach leverages the NL reasoning and high-level planning capabilities of LLMs with predictable computation costs, but lacks intermediate feedback from FL executors. Thus, the core challenge for improving whole-proof generation is that there are no sufficient Lean 4 codes to eliminate the gaps between NL and FL modalities. However, generating such data requires high levels of expertise, making it difficult to scale. As a result, the generated proofs often lack post-hoc analysis of errors and tend to perform badly on tedious questions that require non-trivial solutions. + +Reinforcement Learning for Lean4 Theorem Proving. There are two typical solutions to utilize RL for Lean4 Theorem Proving. In DeepSeek-Prover-v1.5-RL (Xin et al., 2024), the authors employ GRPO algorithm and takes the feedback signals from Lean 4 verifier as reward that reveals the proofs verified as correct or wrong. Such methods only uses the compilation feedback from the entire proof process as the reward result. In this paper, we employ the whole-proof generation approach, so we continue with this same solution. In contrast, Xin et al. (2025) use DPO (Rafailov et al., 2023) to refine the policy LLM by leveraging preference pairs naturally generated during tree search such as MCTS (Coulom, 2006). Therefore, the second solution utilizes the tactic state of each step during the compilation process. However, the effectiveness of existing methods still need improvement. + +Cognitive Behaviors Gandhi et al. (2025) first reveal that models without integrating human-like reasoning abilities (e.g., verification, backtracking, subgoal setting and backward chaining) are hard to obtain significant improvements by directly applying RL algorithms. Similarly, Wan et al. (2025) propose that LLMs lack a specialized design for acquiring meta-thinking, resulting in low efficacy. Zeng et al. (2025) and Liu et al. (2025) propose that the training may directly start from the base models with such cognitive behaviors—a paradigm referred to as zero RL training. Considering the barely satisfactory performance of RL strategies in the formal reasoning field, we have reasons to suspect whether this is due to this fundamental reason. + +# 3. Model Training + +# 3.1. Continual Training + +Base Model. We begin with the previous whole-proof generation models DeepSeek-Prover v1.5-SFT (Xin et al., 2024) and Goedel-Prover (Lin et al., 2025) that are two well-trained versions after the supervised fine-tuning stage. Both of these two models are with 7 billion parameters. Specifically, the two models are trained with proofs added detailed explanatory informal comments. Therefore, the model possesses the basic ability to align natural language descriptions with Lean 4 codes. + +![](images/7359a344dc4b38409c46e9cce1237d8e32c0529d3ecee4ce0d6110ab088be7ee.jpg) +Figure 2 | Distributions of math domains in various Lean 4 dataset. Lean Workbook, Goedel-Prover, STP Lean and NuminaMath are training set. MiniF2F and ProofNet are test set. + +Statement Formalization. Similar to Lin et al. (2025), we train a formalizer, based on Qwen2.5-32B-Coder-Instruct (Hui et al., 2024), to convert the olympiad-level math problems in natural language into formal statements. We collect the formal and informal statement pairs sourced + +
Data SourceFSFS+IC+PF
Lean Workbook (Ying et al., 2024)140K40K
STP-Lean (Dong and Ma, 2025)400K36K
NuminaMath (Li et al., 2024)520K97K
AoPS (AoPS)370K26K
PromptCoT (Zhao et al., 2025)90K20K
Total1.52M0.22M
+ +Table 1 | Distribution of our training sources, including released data by existing models and synthetic data from informal math problems. FS, PF and IC refer to formal statements, proofs and informal comments, respectively. + +from Goedel-Prover $^{2}$ (around 30K). The training details of our formalizer are presented in Appendix A. With the trained formalizer, we convert the natural language statement and then verify with Lean 4 compiler. After these procedures, we finally get 520K formal statements from NuminaMath (Li et al., 2024) and 370K formal statements from AoPS (AoPS). Recently, Zhao et al. (2025) proposed to utilize LLMs to synthesize math problems through mathematical concepts. Based on the data analysis in Figure 2, we employ the PromptCoT framework to synthesize math problems at different levels, including AMC, AIME and USAMO, on three majority math domains (i.e., algebra, number theory and calculus). + +Data Curation. We develop a comprehensive Lean 4 code completion dataset for the supervised fine-tuning, as shown in Table 1. These theorems are sourced from various projects, such as the standard Lean 4 math library Mathlib4 (mathlib4, 2025), Lean Workbook (Ying et al., 2024), synthetic theorems from Goedel-Prover (Lin et al., 2025) and STP (Dong and Ma, 2025). Besides the above-mentioned public data, we use deepseek-prover and goedel-prover to sample proofs that can be correctly verified by Lean 4 compiler. In this manner, we collect Lean data from NuminaMath and AoPS. Similarly, we formalize around 90K math problems synthesized through PromptCoT. In DeepSeek-Prover-v1.5 (Xin et al., 2024), the authors claim that incorporating natural language reasoning before generating theorem proof code can eliminate the gap between problem solving strategies in natural language and theorem proving in Lean. Thus, we also collect comments for part of the data. Finally, we collected around 1.52M formal statements and 0.22M statements with detailed informal comments and verified proofs. + +As shown in Figure 2, we find that the data distributions of Lean Workbook, Goedel-Prover, STP Lean and NuminaMath cover well the MiniF2F test set. On the contrast, there is an obvious domain bias for ProofNet, which is also revealed by (Lin et al., 2025). It indicates a promising direction for further improvement by expanding data in specific mathematical domains. For fairness, we do not adjust the distributions of the training set in our optimizations. + +Implementation After obtaining a large collection of formalized statements with proofs, we continuously train once. We use the lightweight framework SWIFT3 for the supervised fine-tuning (SFT). The SFT experiment is trained on 8 NVIDIA H100 GPUs with the following hyperparameters: a learning rate of $5 \times 10^{-5}$ , a global batch size of 32 over 2 epochs, and a weight decay coefficient of 0.1. + +# 3.2. Integrating Cognitive Behaviors + +Gandhi et al. (2025) have revealed the importance of cognitive behaviors in base models before utilizing RL strategies. Following a similar idea, we induce reflection-like behaviors through specially designed interventions. We propose two kinds of CoT templates to integrate the self-reflection capabilities: Lean completion and rewriting. + +Lean Completion. We sample 5K formal statements used in our previous continue training stage, which are not $100\%$ correct in the pass@16 sampling. We find the position where the first error tactic appears, and re-sample for completion. Once collecting new valid proofs, we can use the following prompt to ask Claude (Anthropic, 2025) to generate the reflection response. We observe that an effective reflection process can only be achieved by combining incorrect proofs, correct proofs, and tactic error messages. Thus, the feedback collected from Lean verifier is also used to create the prompt. An example is presented in Section B.1. + +```txt +Lean Completion Prompt (Claude) +# Initial Proof +' $\text{巧} ^ { \prime }$ lean4 +{old_code} +# Lean Feedback +{error} +# Correct Proof +' $\text{巧} ^ { \prime }$ lean4 +{new_code} +Your task is to generate a reflection of a Lean4 proof as follows: 1. You are provided with a lean proof code that failed to complete the proof, the verify feedback, and a revised correct proof. 2. You need to act as a verifier to check the code step by step and point out where the code fails with incorrect tactics. 3. Provide an alternative method, such as those in the correct proof. 4. Act as you are verifying your own proof.. Here are some rules you need to follow: 1. At the beginning, you should start with a conjunction phrase such as 'let's verify' and claim you need to verify the proof. 2. Instead of directly pointing out the issue, your answer should show the process to identify the incorrect step. 3. Do not refer to Lean Feedback, Correct Proof, or anything that shows you have already known the issue before your reflection. 4. Do not provide any new Lean4 code block, you don't need to write a correct proof. 5. Do not include a summary section. 6. Again, do not refer to Lean Feedback, Correct Proof, do not write anything like 'as shown in the correct proof'. Now, start with a conjunction phrase and require you need to check the proof, do not directly claim there is an issue. +``` + +Rewriting. Based on the above-mentioned Lean completion, there are two main steps in the rewriting strategy. First, we suspect that the generation of incorrect proofs is, to some extent, due to the incorrect problem-solving comments being generated. Therefore, we introduce Qwen2.5-72B-instruct (Team, 2024) to evaluate the problem-solving comments and then regenerate the correct problem-solving comments. Second, we provide Claude with both the invalid and newly rewritten valid Lean 4 code to generate comprehensive Chains of Thought (CoTs) that explain + +the reasoning process. In this manner, we collect 19K samples with CoTs (See the detailed examples in Appendix B.2). Here are the prompt templates for these two steps: + +Rewriting - Step 1 (Qwen2.5-72B-Instruct) +```txt +You are an experienced mathematics evaluation teacher. You will be provided with a math problem and the corresponding solution idea.. +Please determine whether the solution idea is correct. If it is, please output "Correct", otherwise please output "Incorrect". If the solution idea is incorrect, please provide the correct solution idea, and the output of the solution idea should be included within \*\* and \*\*. +``` + +```txt +The output format is as follows: +``` + +```txt +1. Judgement: Incorrect. Solution: “‘‘Solution idea’’” +``` + +```txt +2. Judgement: Correct. +``` + +```txt +[math problem start] +{problem} +[math problem end] +``` + +```txt +[solution idea start] +{solution} +[solution idea end] +``` + +With these synthesized data, we employ our second-stage continual training, with a learning rate of $5 \times 10^{-5}$ and overall batch size of 1024 for one epoch. Finally, we obtain the model, named as Leanabell-Prover-SFT. + +# 3.3. Reinforcement Learning + +We integrate reinforcement learning (RL) with the Lean 4 theorem prover to automate the discovery of valid proofs. The RL agent interacts with the Lean 4 environment, generating whole proofs and receiving feedback from Lean 4 compiler as reward signals. The agent's objective is to maximize cumulative rewards by learning to generate syntactically correct, logically valid proofs for an input formal statement. + +Policy Optimization Algorithms. We employ the recent GRPO (Shao et al., 2024) as our RL algorithm. For each input formal statement $q$ , GRPO samples a group of outputs $\{o_1, o_2, \dots, o_G\}$ from the old policy $\pi_{\theta_{old}}$ and then collect the feedback $\{\tau_1, \tau_2, \dots, \tau_G\}$ for the group of responses through Lean 4 compiler. According to each feedback status $\tau_i$ , we assign a particular reward. Then, the advantage of the $i$ -th output is calculated by normalizing the group-level rewards $\{R_1, R_2, \dots, R_G\}$ : + +$$ +\hat {A} _ {i, t} = \frac {r _ {i} - \operatorname {m e a n} \left(\left\{R _ {i} \right\} _ {i = 1} ^ {G}\right)}{\operatorname {s t d} \left(\left\{R _ {i} \right\} _ {i = 1} ^ {G}\right)}. \tag {1} +$$ + +Finally, we optimizes the policy model $\pi_{\theta}$ by maximizing the following objective: + +$$ +\begin{array}{l} \mathcal {J} _ {\mathrm {G R P O}} (\theta) = \mathbb {E} _ {(q, a) \sim \mathcal {D}, \{o _ {i} \} _ {i = 1} ^ {G} \sim \pi_ {\theta_ {\mathrm {o l d}}} (\cdot | q)} \\ \left. \right.\left[ \frac {1}{G} \sum_ {i = 1} ^ {G} \frac {1}{| o _ {i} |} \sum_ {t = 1} ^ {| o _ {i} |} \left(\min \left(r _ {i, t} (\theta) \hat {A} _ {i, t}, \operatorname {c l i p} \left(r _ {i, t} (\theta), 1 - \varepsilon , 1 + \varepsilon\right) \hat {A} _ {i, t}\right)\right)\right], \tag {2} \\ \end{array} +$$ + +where + +$$ +r _ {i, t} (\theta) = \frac {\pi_ {\theta} \left(o _ {i , t} \mid q , o _ {i , < t}\right)}{\pi_ {\theta_ {\mathrm {o l d}}} \left(o _ {i , t} \mid q , o _ {i , < t}\right)}, \tag {3} +$$ + +$\varepsilon$ is a hyperparameter. In our experiments, we set $\varepsilon = 0.2$ . Notably, we do not use the Kullback-Leibler (KL) divergence penalty. + +# Rewriting - Step 2 (Claude) + +Wrong code + +" ' lean4 + +lean code1 + +, + +Correct code + +" ' lean4 + +lean code2 + +, + +I have given you with two Lean4 code solutions to the same problem. The first solution fails to compile in Lean4, while the second solution compiles successfully. + +Your task is to: + +1. Act as a verification assistant and carefully compare these two code snippets. +2. Identify the specific errors or flawed strategies in the first solution that caused compilation failure. +3. Explain the reasoning process that would lead someone from the incorrect approach to the correct solution. + +When analyzing the code, please simulate the thought process of someone examining their own proof. Begin sections of your analysis with phrases like "Let's verify my proof...", "Wait, I see an issue here...", or "Let me reconsider this approach..." This should demonstrate how someone might catch and correct their own mistakes. + +The analysis emphasizes conceptual understanding over syntax details, explaining the fundamental logical or strategic errors in the initial solution and demonstrating how the corrected solution properly addresses these conceptual problems. + +Please structure your response with: + +- Identification of specific errors in the first solution. +- Explanation of the conceptual issues that led to these errors. +- How to fix the conceptual problems in error so as to generate the problem-solving idea of the second solution? + +Do not provide any new Lean4 code beyond what I've given you - focus exclusively on analyzing the provided code. Don't include the phased titles in the output results, such as "Identification of Specific Errors in the First Solution", "Conceptual Issues That Led to These Errors", etc. Also, don't use expressions like "the first solution" or "the second solution". Use "current solution" to represent "first solution". Although you used the second solution for auxiliary analysis, avoid revealing in your response that you've seen its content. For example, refrain from saying things like 'I noticed that in the new solution.' Instead, respond as if you're thinking independently, based solely on the first solution. + +Reward Function. Unlike stepwise rewards, the feedback is provided only after the full proof is compiled by Lean 4 verifier in our experiments. Our rewards are derived from: (1) Terminal reward $R_{\text{success}}$ : a scalar reward granted if the entire proof is validated successfully by Lean 4 verifier. (2) Penalty $R_{\text{fail}}$ : a negative reward for proofs with critical errors (e.g., type mismatches, infinite loops, unsolved goals and etc). Moreover, we observe that there are warnings in the feedback, such as some unnecessary or redundant steps have no negative effects on the final validation. In our experiments, we ignore warning cases as long as the compilation and verification process passes successfully. So, given the feedback $\tau$ from Lean 4 compiler, our final reward function can be formulated as: + +$$ +R (\tau) = \left\{ \begin{array}{l l} R _ {\text {s u c c e s s}} & \text {i f L e a n 4 f u l l y v a l i d a t e s} \tau \\ R _ {\text {f a i l}} & \text {o t h e r w i s e (s y n t a x e r r o r s / t i m e o u t)} \end{array} \right. \tag {4} +$$ + +Implementation We conduct RL training based on the Leanabell-Prover-SFT. We use a constant learning rate of $1 \times 10^{-6}$ . For each theorem, we sample a group of 32 candidate proofs, with maximal rollout length set to 8192. The training global batch size is set to $32 \times 32 = 1024$ . On the RL training data, we select samples from those whose number of validations in Pass@32 falls within the range of [2, 16]. We believe this subset of data has a certain level of difficulty while providing exploration space, making it effective for updating the policy model. The detailed distribution of pass@32 is presented in Figure 6 in Appendix C. Finally, we obtain the RL version model named as Leanabell-Prover-RL. + +# 4. Experiments + +Benchmarks We follow the previous work (Lin et al., 2025; Xin et al., 2024; Wang et al., 2024) and mainly validate the effectiveness of our proposed method on the most commonly-used MiniF2F-test (Zheng et al., 2021). + +Results on MiniF2F. We have two versions of our models posttrained from two strong prover models: Deepseek-Prover-v1.5-SFT and Goedel-Prover-SFT, namely Leanabell-Prover-DS and Leanabell-Prover-GD. We mainly compare current whole proof generation methods, while ignore those with proof-step methods using far more inference-compute. As shown in Table 2, our posttraining framework boosts both DeepSeek-Prover-v1.5-SFT and Goedel-Prover-SFT models. On the minimum sample budget, Leanabell-Prover-GD-RL achieves the SOTA of MiniF2F (59.8% on pass@32), which gains up to 2.2% (i.e. from Goedel-Prover SFT, from 57.6% to 59.8%). We can observe that following our continual training phase, our method (Leanabell-Prover-DS-SFT) shows improvement over its base model (DeepSeek-Prover-v1.5-SFT), and the RL version continues to effectively enhance its performance. Meanwhile, Leanabell-Prover-GD-SFT performs almost identically to Leanabell-Prover-DS-SFT. This is reasonable, as Goedel-Prover-SFT is finetuned from DeepSeek-Pover-v1.5-base, with a significantly larger amount of data compared to our continual training stage. Therefore, our continual training on Leanabell-Prover-GD-SFT primarily adjusts the model's reasoning ability across different math domain distributions and incorporates the proper CoT format with cognitive behaviors into the current training data. This makes the checkpoint more conducive to RL training. + +We also increase the sampling budget to compare the performance gains. For the SFT models, as can be seen, the baseline DeepSeek-Prover-v1.5-SFT achieves around $2.2\%$ performance gains (i.e., $48.2\%$ to $50.4\%$ ), as increasing sampling budget from 32 to 128. Within our configurations, our Leanabell-Prover-DS-SFT and Leanabell-Prover-GD-SFT models also achieve $1.8\%$ (i.e., $54.9\%$ to $56.7\%$ ) and $1.2\%$ (i.e., $58.2\%$ to $59.4\%$ ) performance gains on the same inference scaling experiments, respectively. For the RL models, DeepSeek-Prover-v1.5-RL achieves $1.6\%$ performance gains (i.e., $50.0\%$ to $51.6\%$ ), while our Leanabell-Prover-DS-RL achieves more gains (i.e., $56.6\%$ to $59.0\%$ ). Therefore, after the model has undergone SFT and RL training, our models still maintain the exploration capabilities. + +Exploration Abilities and RL. We first examine our two SFT models, with their pass@16 accuracy at different sampling temperatures. This metric can serve as an indicator of the policy's exploration ability and is particularly relevant for RL, as it reflects the policy's ability to generate responses that can achieve a positive reward. As shown in Figure 3, we find both SFT models are exploratory, and thus ready for RL. The RL training rewards are shown in Figure 4. During our experiments, we also compared the original GRPO with Dr. GRPO (Liu et al., 2025), and + +
MethodSample budgetminiF2F-test
TheoremLlama [28]12833.6%
DeepSeek-Prover-v1 [32]12846.1% ± 0.5%
DeepSeek-Prover-v1.5-Base [33]12829.7% ± 0.5%
320039.2%
640042.2%
DeepSeek-Prover-v1.5-SFT [33]3248.2% ± 0.6%
6449.6% ± 0.7%
12850.4% ± 0.4%
320053.3% ± 0.5%
DeepSeek-Prover-v1.5-RL [33]3250.0% ± 0.5%
6450.7% ± 0.4%
12851.6% ± 0.5%
320054.9% ± 0.7%
STP [7]12857.7% ± 0.6%
320061.7% ± 0.6%
Goedel-Prover-SFT [15]3257.6% ± 0.7%
320062.7%
Leanabell-Prover-DS-SFT3254.9%
6455.3%
12856.7%
Leanabell-Prover-DS-RL3256.6%
6457.4%
12859.0%
Leanabell-Prover-GD-SFT3258.2%
6459.0%
12859.4%
Leanabell-Prover-GD-RL3259.8%
6460.7%
12861.1%
+ +Table 2 | Comparison with state-of-the-art methods on the miniF2F-test dataset. The notation $\mu \pm \sigma$ denotes the average accuracy $\mu$ and the standard deviation $\sigma$ . "DS" and "GD" refer to using the DeepSeek-Prover-v1.5-SFT and Goedel-Prover-SFT as base models to continue SFT and RL training, respectively. + +![](images/01687d0709017cc46d9129128904e3099ce96b65ce6ffb05bc736ea378baf0ac.jpg) +Figure 3 | Exploration ability: pass@16 measures how well base models explore. + +![](images/7f4f27568a48a2be4320d9e2cbf50395a8c162d8e7903c6e2cea1fd88a15c8ad.jpg) +Figure 4 | Left: Reward curve during training Leanabell-Prover-Prover-DS-RL. Right: Reward curve during training Leanabell-Prover-Prover-GD-RL. + +![](images/33a464e26cdd80f93c82ec770c3dc8cb34fc16b34875ff02895ac22cb7f8d7e9.jpg) + +found that the training dynamics remained largely consistent under these two RL training algorithms. This may be attributed to the fact that the length of different rollout responses, regardless of whether they are correct or incorrect, does not vary significantly in formal language reasoning. We have also observed that selecting an appropriate prompt set is crucial for RL training. Merely using pass@N as the sole criterion is insufficient to unlock the full potential of RL. As shown in Figure 5, we analyze the distributions of error problems across different source types in the MiniF2F-test set. We observed that, based on DeepSeek-Prover-v1.5-SFT, errors can be reduced across all data source types in MiniF2F-test set, especially for AMC, MATH, and CUSTOM. However, this improvement is significantly reduced in optimization results based on Goedel-Prover-SFT. This suggests that such as the intrinsic difficulty level of the statements (e.g., whether they are at the AIME or IMO level), the coverage of mathematical domains, and the balance with the prover model's capabilities, are also important. + +![](images/35e63804bae7aac8349c0715b1dcd16bb5dda356dba66e77102f715225f4ac8d.jpg) +Figure 5 | Distribution of problem types that failed verification on the MiniF2F-test set. + +![](images/c7758c7a762f4643a6567b155f612a84f49906540bd173b97b882308eb809395.jpg) + +# 5. Conclusion, Limitation, and Future Work + +We present a series of ATP models, named Leanabell-Proverseries, by investigating the posttraining scaling of current provers. Leanabell-Prover is started with DeepSeek-Prover-v1.5-SFT and Goedel-Prover-SFT which are two well-trained whole-proof generation models. We first collect a large collection of formalized statements with proofs to continue training. More importantly, we embed cognitive behaviors into the base models by applying a second-stage continual training on such synthetic data. With such prepared SFT models, we finally achieve the final performance through the RL optimization. + +Limitations. As we find that the base prover model (i.e., DeepSeek-Prover-v1.5) is a weak LLM compared to those used for posttraining scaling in natural languages, such as Deepseek v3 and the Qwen2.5 family. Although we have piloted to integrate cognitive behaviors into the model and selected the prompt set of RL according to the ability of our SFT models, the expected stronger RL performance has not fully materialized. Our findings right now are more in line with the replication on weak LLMs with RL (Liu et al., 2025; Zeng et al., 2025). Moreover, although we achieved performance gains, we observe that the integrated self-reflection capacities demonstrate a weakening trend after RL training. This suggests that effectively integrating cognitive behaviors into such weak base LLMs remains highly challenging. + +Future Work. Our goal is to fully invoke formal reasoning abilities, and thus we will continue to explore the following directions (hopefully can achieve some improvement): + +- Bridging formal reasoning with natural languages. Reasoning in formal languages has natural benefits for theorem proving, including no mathematical knowledge hallucination, and all steps and states with verified feedback. However, reasoning abilities of current formal provers (including our current work) still lag behind those of natural language reasoning models. We have made the first step to design a CoT template and synthesize data accordingly, which is intended to insert natural language information to help formal reasoning. We also tried Deepseek R1 with prompting for formal proof generation, which achieves $51.6\%$ (pass@32) on MiniF2F-test. Therefore, we hope to develop more effective manners that can transfer the math knowledge and reasoning abilities in natural languages into formal proof generation. +- Bridging whole proof generation with proof-step methods. We believe current RL framework can help bridging these two lines of methods. For example, we can replace the + +sampling-based response-level rollout in RL into proof-step rollout for better exploration (but still rollout into a whole proof or reach the maximum length, then calculate the response-level reward), thus improving the learning efficiency of the RL training stage. + +# References + +[1] Anthropic. Claude 3.7 Sonnet System card. 2025. URL https://www.anthropic.com/news/claudi-3-7-sonnet. +[2] AoPS. Art of problem solving. https://artofproblemsolving.com/. Accessed: [date]. +[3] Z. Azerbayev, H. Schoelkopf, K. Paster, M. D. Santos, S. McAleer, A. Q. Jiang, J. Deng, S. Biderman, and S. Welleck. LLemma: An open language model for mathematics. arXiv preprint arXiv:2310.10631, 2023. +[4] R. Coulom. Efficient selectivity and backup operators in monte-carlo tree search. In International conference on computers and games, pages 72-83. Springer, 2006. +[5] L. De Moura, S. Kong, J. Avigad, F. Van Doorn, and J. von Raumer. The Lean theorem prover (system description). In International Conference on Automated Deduction (CAD), 2015. +[6] DeepMind. Alphaproof and Alphageometry, July 2024. URL https://deepmind.google.de/discover/blog/ai-solves-imo-problems-at-silver-medal-level/. +[7] K. Dong and T. Ma. Stp: Self-play llm theorem provers with iterative conjecturing and proving. arXiv preprint arXiv:2502.00212, 2025. +[8] K. Gandhi, A. Chakravarthy, A. Singh, N. Lile, and N. D. Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars. arXiv preprint arXiv:2503.01307, 2025. +[9] D. Guo, D. Yang, H. Zhang, J. Song, R. Zhang, R. Xu, Q. Zhu, S. Ma, P. Wang, X. Bi, et al. Deepseek-R1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. +[10] B. Hui, J. Yang, Z. Cui, J. Yang, D. Liu, L. Zhang, T. Liu, J. Zhang, B. Yu, K. Dang, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024. +[11] G. Lample, T. Lacroix, M.-A. Lachaux, A. Rodriguez, A. Hayat, T. Lavril, G. Ebner, and X. Martinet. Hypertree proof search for neural theorem proving. Advances in Neural Information Processing Systems (NeurIPS), 35, 2022. +[12] J. Li, E. Beeching, L. Tunstall, B. Lipkin, R. Soletskyi, S. C. Huang, K. Rasul, L. Yu, A. Jiang, Z. Shen, Z. Qin, B. Dong, L. Zhou, Y. Fleureau, G. Lample, and S. Polu. Numinamath, 2024. +[13] Y. Li, D. Du, L. Song, C. Li, W. Wang, T. Yang, and H. Mi. Hunyuanprover: A scalable data synthesis framework and guided tree search for automated theorem proving. arXiv preprint arXiv:2412.20735, 2024. +[14] H. Lin, Z. Sun, Y. Yang, and S. Welleck. Lean-star: Learning to interleave thinking and proving. arXiv preprint arXiv:2407.10040, 2024. + +[15] Y. Lin, S. Tang, B. Lyu, J. Wu, H. Lin, K. Yang, J. Li, M. Xia, D. Chen, S. Arora, et al. Goedelprover: A frontier model for open-source automated theorem proving. arXiv preprint arXiv:2502.07640, 2025. +[16] Z. Liu, C. Chen, W. Li, P. Qi, T. Pang, C. Du, W. S. Lee, and M. Lin. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783, 2025. +[17] mathlib4. The math library of lean 4, 2025. URL https://github.com/leanprover-community/mathlib4. +[18] L. d. Moura and S. Ullrich. The lean 4 theorem prover and programming language. In International Conference on Automated Deduction, 2021. +[19] L. C. Paulson. Isabelle: A generic theorem prover. Springer, 1994. +[20] S. Polu and I. Sutskever. Generative language modeling for automated theorem proving. arXiv preprint arXiv:2009.03393, 2020. +[21] S. Polu, J. M. Han, K. Zheng, M. Baksys, I. Babuschkin, and I. Sutskever. Formal mathematics statement curriculum learning. arXiv preprint arXiv:2202.01344, 2022. +[22] R. Rafailov, A. Sharma, E. Mitchell, C. D. Manning, S. Ermon, and C. Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems (NeurIPS), 2023. +[23] Z. Shao, P. Wang, Q. Zhu, R. Xu, J. Song, X. Bi, H. Zhang, M. Zhang, Y. Li, Y. Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. +[24] Q. Team. Qwen2.5: A party of foundation models, September 2024. URL https://qwen.lm.github.io/blog/qwen2.5/. +[25] Q. Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025. URL https://qwenlm.github.io/blog/qwq-32b/. +[26] T. H. Trinh, Y. Wu, Q. V. Le, H. He, and T. Luong. Solving olympiad geometry without human demonstrations. Nature, 625(7995):476-482, 2024. +[27] Z. Wan, Y. Li, Y. Song, H. Wang, L. Yang, M. Schmidt, J. Wang, W. Zhang, S. Hu, and Y. Wen. Rema: Learning to meta-think for llms with multi-agent reinforcement learning. arXiv preprint arXiv:2503.09501, 2025. +[28] R. Wang, J. Zhang, Y. Jia, R. Pan, S. Diao, R. Pi, and T. Zhang. Theoremlama: Transforming general-purpose llms into lean4 experts. arXiv preprint arXiv:2407.03203, 2024. +[29] R. Wang, R. Pan, Y. Li, J. Zhang, Y. Jia, S. Diao, R. Pi, J. Hu, and T. Zhang. Ma-lot: Multiagent lean-based long chain-of-thought reasoning enhances formal theorem proving. arXiv preprint arXiv:2503.03205, 2025. +[30] Z. Wu, S. Huang, Z. Zhou, H. Ying, J. Wang, D. Lin, and K. Chen. Internl m2.5-Stepprover: Advancing automated theorem proving via expert iteration on large-scale lean problems. arXiv preprint arXiv:2410.15700, 2024. +[31] Z. Wu, J. Wang, D. Lin, and K. Chen. Lean-github: Compiling github lean repositories for a versatile lean prover. arXiv preprint arXiv:2407.17227, 2024. + +[32] H. Xin, D. Guo, Z. Shao, Z. Ren, Q. Zhu, B. Liu, C. Ruan, W. Li, and X. Liang. Deepseek-prover: Advancing theorem proving in llms through large-scale synthetic data. arXiv preprint arXiv:2405.14333, 2024. +[33] H. Xin, Z. Ren, J. Song, Z. Shao, W. Zhao, H. Wang, B. Liu, L. Zhang, X. Lu, Q. Du, et al. Deepseek-prover-v1. 5: Harnessing proof assistant feedback for reinforcement learning and monte-carlo tree search. arXiv preprint arXiv:2408.08152, 2024. +[34] R. Xin, C. Xi, J. Yang, F. Chen, H. Wu, X. Xiao, Y. Sun, S. Zheng, and K. Shen. Bfs-prover: Scalable best-first tree search for llm-based automatic theorem proving. arXiv preprint arXiv:2502.03438, 2025. +[35] K. Yang, A. Swope, A. Gu, R. Chalamala, P. Song, S. Yu, S. Godil, R. J. Prenger, and A. Anandkumar. Leandojo: Theorem proving with retrieval-augmented language models. Advances in Neural Information Processing Systems (NeurIPS), 2023. +[36] H. Ying, Z. Wu, Y. Geng, J. Wang, D. Lin, and K. Chen. Lean workbook: A large-scale lean problem set formalized from natural language math problems. arXiv preprint arXiv:2406.03847, 2024. +[37] W. Zeng, Y. Huang, Q. Liu, W. Liu, K. He, Z. Ma, and J. He. Simplerl-zoo: Investigating and taming zero reinforcement learning for open base models in the wild. arXiv preprint arXiv:2503.18892, 2025. +[38] X. Zhao, W. Wu, J. Guan, and L. Kong. Promptcot: Synthesizing olympiad-level problems for mathematical reasoning in large language models. arXiv preprint arXiv:2503.02324, 2025. +[39] K. Zheng, J. M. Han, and S. Polu. Minif2f: a cross-system benchmark for formal olympiad-level mathematics. arXiv preprint arXiv:2109.00110, 2021. + +# Appendix + +# A. Formalizer Details + +We start with Qwen25-Coder-32B-Instruct (Hui et al., 2024) and use following instruct prompt to train the formalizer: + +# Formalizer Prompt + +Please translate the mathematical statement {informal_statement} into a theorem statement in Lean 4 code. + +Please do not generate codes of proof or comment sentences (e.g., starting with '/-' or '-'). + +The Lean 4 codes are required to complete the 'statement' in the following text: + +" ' lean4 + +theorem lean_workbook 'statement' := by sorry + +As shown in Table 3, the formalizer is targeted to translate the natural language statement to formal statement in Lean 4 codes. We take use of the 29.7K data released by Goedel-Prover (Lin et al., 2025), which provides pairs of informal statement and formal statement in each sample. We train the formalizer with a fixed learning rate $5 \times 10^{-6}$ for 2 epochs. We verify the Compiling Correctness (CC) Test, and Faithfulness and Completeness (FC) Test by following the prompts in Goedel-Prover. As shown in Table 4, our formalizer performs similarly to the Formalizer A (Lin et al., 2025). + +
Example 1Example 2
Informal StatementSolve for x in the given inequality: x2-2x-24<0Prove that ln(eπ) is equal to π.
Formalizer Outputtheorem lean_workbook (x : R): x^2 - 2*x - 24 < 0 ↔ x ∈ Set.Ioo (-4) 6 := by sorrytheorem lean_workbook : Real.log (Real.exp π) = π := by sorry
+ +Table 3 | Examples of formalizer inputs and outputs for two examples. + +
ModelCC Test (%)FC Test (%)
Formalizer A (Lin et al., 2025)76.748.1
Formalizer B (Lin et al., 2025)88.580.4
Ours Formalizer77.649.0
+ +Table 4 | Quality assessment of the formalized statement. "CC" refers to Compiling Correctness (CC) Test and "FC" refers to Faithfulness and Completeness (FC) Test. + +# B. Cognitive Behaviors Design + +# B.1. Lean Completion Example + +Input: +```txt +Complete the following Lean 4 code with explanatory comments preceding each line of code: +' \(\mathit{lean4}\) +import Mathlib +import Aesop +set_option maxHeartbeats O +open BigOperators Real Nat Topology Rat +/\~ Given \(\sin (\frac{\pi}{4} -x) = \frac{3}{5}\) , find the value of sin2x. Show that it is \(\backslash\) frac{7}{25}. - / theorem sin_2x_value (x : R) (h : sin (Real.pi / 4 - x) = 3 / 5) : sin (2 * x) \(= 7 / 25\coloneqq\) by +/\~ Given \(\backslash (\backslash \sin \backslash \text{left} (\backslash \text{frac} (\backslash \text{pi})\{4\} -\text{x}\backslash \text{right}) = \backslash \text{frac} (3)\{5\} \backslash)\) , we need to find the value of \(\backslash (\backslash \sin 2x\backslash)\) . We start by using the trigonometric identity for the sine of a difference and the double-angle formula for sine. Specifically, we use the identity \(\backslash (\backslash \sin (a - b) = \backslash \sin a\backslash \cos b - \backslash \cos a\backslash \sin b\backslash)\) and the double-angle formula \(\backslash (\backslash \sin 2x = 2\backslash \sin x\backslash \cos x\backslash)\) . 1. Substitute \(\backslash (\backslash \sin \backslash \text{left} (\backslash \text{frac} (\backslash \text{pi})\{4\} -\text{x}\backslash \text{right}) = \backslash \text{frac} (3)\{5\} \backslash)\) into the identity for the sine of a difference: [ \\sin \(\backslash\) left(\frac{\frac{\pi}{4}}{4}\)-x\right) \(=\) \\sin \(\backslash\) left(\frac{\frac{\pi}{4}}{4}\)-x\right) \(=\) \\sin \(\backslash\) left(a-b) \(=\) \\sin a\cos b- cos a\sin b) and the double-angle formula \(\backslash (\backslash \sin 2x = 2\backslash \sin x\backslash \cos x\backslash)\) +] +2. Calculate \(\backslash (\backslash \sin \backslash \text{left} (\backslash \text{frac} (\backslash \text{pi})\{4\} -\text{x}\backslash \text{right}) = \backslash \text{frac} (3)\{5\} \backslash)\) into \\left(\frac{\frac{\pi}{4}}{4}\)-x\right) and \\( (\cos \\left(\frac{\frac{\pi}{4}}{4}\)-x\right)\\left(\frac{\frac{\pi}{4}}{4}\)-x\right)\\left(\frac{\frac{\pi}{4}}{4}\)-x\right)\\left(\frac{\frac{\pi}{4}}{4}\)-x\right)\\left(\frac{\frac{\pi}{4}}{4}\)-x\right)\\left(\frac{\frac{\pi}{4}}{4}\)-x\right)\\left(\frac{\pi}{4}\)-x\right)\\left(\frac{\pi}{4}\)-x\right)\\left(\frac{\pi}{4}\)-x\right)\\left(\frac{\pi}{4}\)-x\right)\\left(\frac{\pi}{4}\)-x\right)\\left(\frac{\pi}{4}\)-x\right)\\left(\frac{\pi}{4}\)-x\right)\\left(\frac{\pi}{4} -x\right)\\left(\frac{\pi}{4} -x\right)\\left(\frac{\pi}{4} -x\right)\\left(\frac{\pi}{4} -x\right)\\left(\frac{\pi}{4} -x\right)\\left(\frac{\pi}{4} -x\right)\\left(\frac{\pi}{4} -x\right)\\left(\frac{\pi}{4} -x +``` + +```latex +\sin 2x = 2 \sin \left(\text {left} (\text {x} \text {right}) \cos \left(\text {left} (\text {x} \text {right}) \right.\right) +\] +7. Substitute $\backslash (\backslash \cos \backslash \text {left} (\text {x} \backslash \text {right}) - \backslash \sin \backslash \text {left} (\text {x} \backslash \text {right}) = \backslash \operatorname{frac}\{3$ $\backslash \operatorname{sqrt}\{2\} \} \{5\} \backslash)$ into the equation: +\[ +\begin{array}{l} +\backslash [ \\ +\sin 2x = 2 \backslash \sin \backslash \text {left} (\text {x} \backslash \text {right}) \cos \backslash \text {left} (\text {x} \backslash \text {right}) = 2 \backslash \operatorname{left} (\backslash \operatorname{frac}\{3 +\} \backslash \operatorname{sqrt}\{2\}) \{5\} \backslash \text {right}) \backslash \operatorname{left} (\backslash \operatorname{frac}\{\backslash \operatorname{sqrt}\{2\}\} \{2\} \backslash \text {right}) = 2 \backslash \operatorname{cdot} \backslash \operatorname{frac}\{3 +\} \backslash \operatorname{sqrt}\{2\}) \{5\} \backslash \operatorname{cdot} \backslash \operatorname{frac}\{\backslash \operatorname{sqrt}\{2\}\} \{2\} = \backslash \operatorname{frac}\{3 \backslash \operatorname{sqrt}\{2\}\} \{5\} \backslash \operatorname{cdot} +\end{array} +\] + $\backslash$ +8. Simplify the final expression: +\[ +\begin{array}{l} +\backslash [ \\ +\sin 2x = \backslash \operatorname{frac}\{7\} \{25\} +\end{array} +\] +``` + +Wrong Proof: +```txt +-- Use the trigonometric identity for the sine of a difference and the double-angle formula for sine. +simp [Real.sin_sub, Real.sin_two.mul, Real.cos_two.mul, Real.sin(pi_div_four, Real.cos(pi_div_four] at h $\vdash$ +-- Simplify the expression using field operations. +field_simp at h $\vdash$ +-- Normalize the expression by expanding and simplifying it. +ring_nf at h $\vdash$ +-- Use non-linear arithmetic to verify the final result. +nlinarith [Real.sin_sq_add_cos_sq x, Real.sin_le_one x, Real.cos_le_one x] +``` + +Tactic Error Messages: +```txt +linarith failed to find a contradiction +case h1.h +x: $\mathbb{R}$ +h: $\sqrt{2} * x.\cos * 5 - \sqrt{2} * x.\sin * 5 = 6$ +at: x.cos * x.sin * 50 < 7 + $\vdash$ False +failed +``` + +Searched Valid Proof: +```sql +-- Use the trigonometric identity for the sine of a difference and the double-angle formula for sine. +simp [Real.sin_sub, Real.sin_two.mul, Real.cos_two.mul, Real.sin(pi_div_four, Real.cos(pi_div_four] at h $\vdash$ +-- Simplify the expression using field operations. +field_simp at h $\vdash$ +-- Normalize the expression by expanding and simplifying it. +ring_nf at h $\vdash$ +-- Use the Pythagorean identity to simplify further. +nlinarith [Real.sin_sq_add_cos_sq x, Real.sqrt_nonneg 2, sq_sqrt (show $0 \leq$ 2 by norm_num)]" +``` + +Claude CoTs: +```txt + +``` + +let's verify this proof to make sure it's working correctly. I need to check each step of the proof to ensure it's valid and identify any potential issues. + +First, let's examine what we're trying to prove. It appears to be a trigonometric identity involving sine and cosine functions. + +The proof begins with: + +simp [Real.sin_sub, Real.sin_two.mul, Real.cos_two.mul, Real.sin(pi_div_four, Real.cos(pi_div_four] at h $\vdash$ + +This step seems reasonable - we're using standard trigonometric identities to simplify both the hypothesis $h$ and the goal. + +Next, we have: + +field_simp at h + +This tactic simplifies expressions involving field operations, which is appropriate for real-valued trigonometric expressions. + +Then: + +ring_nf at h + +This normalizes ring expressions, which should help simplify our algebraic expressions further. + +Finally, we have: + +nlinarith [Real.sin_sq_add_cos_sq x, Real.sin_le_one x, Real.cos_le_one x] + +This is where I need to be careful. The 'nlinarith' tactic is trying to use non-linear arithmetic with the Pythagorean identity $\sin^2 (x) + \cos^2 (x) = 1'$ and the bounds on sine and cosine functions. + +The issue might be that we're using bounds ('Real.sin_le_one x, Real.cos_le_one x') rather than equalities. When dealing with trigonometric identities, we often need exact relationships rather than just bounds. The Pythagorean identity is an equality, but we're also including inequalities in our reasoning. + +A better approach would be to focus on using the Pythagorean identity directly, possibly with additional facts about square roots. The `sqrt(2)` in our expressions suggests we might need properties about square roots, such as `sq_sqrt` to handle expressions like `(\sqrt{2})^{\sim}`. + +Additionally, we should ensure that we're providing all the necessary facts to the arithmetic solver. For instance, we might need to explicitly include that '\sqrt{2} \geq 0'\), which could be done with 'Real.sqrt_nonneg \(2'\). + +# B.2. Rewriting Example + +Input: +```txt +Complete the following Lean 4 code with explanatory comments preceding each line of code: +``` +``` +```lean4 +import Mathlib +import Aesop +set_option maxHeartbeats 0 +open BigOperators Real Nat Topology Rat +-- Prove that if $n \geq 2$ , then $n$ distinct integers can be given such that the sum of any two of them is divisible by their difference.-/ +theorem lean_workbook : ∀ n : N, 2 ≤ n → ∃ l : List Z, l.length = n ∧ ∀ x y, x ∈ l ∧ y ∈ l ∧ x ≠ y → (x + y) % (x - y) = 0 := by +``` + +Wrong Proof: +```latex +- We need to prove that for any natural number $(n \backslash \text{geq} 2)$ , there exists a list of $(n \backslash)$ distinct integers such that the sum of any two integers in the list is divisible by their difference. +1. **Base Case**: When $(n = 2)$ , we can choose the list $([0, 1])$ . For any $(x \backslash)$ and $(y \backslash)$ in this list, if $(x \backslash \text{neq} y)$ , then $(x + y) \mod (x - y) = 0$ . +2. **Inductive Step**: Assume the statement holds for some $(n \backslash \text{geq} 2)$ . We need to show it holds for $(n + 1)$ . By the inductive hypothesis, there exists a list of $(n \backslash)$ distinct integers $(1 \backslash)$ such that the sum of any two elements in $(1 \backslash)$ is divisible by their difference. We can add one more integer to this list and show that the new list still satisfies the condition. +- / +intro n hn +-- Use a base case for $n = 2$ +use [0, 1] +-- Verify that the base case satisfies the conditions +simp +-- Use Aesop to handle the general case +<;> aesop +``` + +Revised Comments and Proofs: +```txt +/\-We need to prove that for any natural number \(\backslash\) (n\geq 2 \), there exists a list of \(\backslash\) (n \) distinct integers such that the sum of any two integers in the list is divisible by their difference. +1. \*\*Base Case**: When \(\backslash\) ( \(n = 2\backslash\) ), we can choose the list \(\backslash ([0,2]\backslash)\). For any \(\backslash\) (x \) and \(\backslash\) y \) in this list, if \(\backslash\) (x\neq y \), then \(\backslash\) (x + y) mod (x - y) = 0 \). For example, \(\backslash\) (0 + 2) mod (0 - 2) = 2 \mod -2 = 0 \). +2. \*\*Inductive Step**: Assume the statement holds for some \(\backslash\) (n\geq 2 \). We need to show it holds for \(\backslash\) (n + 1 \). By the inductive hypothesis, there +``` + +```txt +exists a list of \(\backslash\) (n \(\backslash\)) distinct integers \(\backslash\) (\{a_1, a_2, \ldots, a_n\} \}) such that the sum of any two elements in this list is divisible by their difference. We can add one more integer to this list to form a new list of \(\backslash\) (n + 1 \(\backslash\)) integers. A good choice for the new integer is \(\backslash\) (a_n+1 \() =\) a_n + (n+1)! \)). This ensures that for any \(\backslash\) (i \(\backslash\)) and \(\backslash\) (j \(\backslash\)) in the range \(\backslash\) (1 \(\backslash\) leq i, j \(\backslash\) leq n+1 \(\backslash\)) with \(\backslash\) (i \(\backslash\) neq j \(\backslash\)), the sum \(\backslash\) (a_i + a_j \(\backslash\)) is divisible by their difference \(\backslash\) (a_i - a_j \(\backslash\)). This is because \(\backslash\) (a_i + a_j) - (a_i - a_j) = 2a_j \), and since \(\backslash\) (a_j \(\backslash\)) is an integer, \(\backslash\) (2a_j \(\backslash\)) is always divisible by \(\backslash\) (a_i - a_j \)). Thus, the list \(\backslash\) (\{a_1, a_2, \ldots, a_ldots, a_n, a_{\{n+1\}}\} \}) satisfies the condition, completing the inductive step. +- +- Introduce the natural number \(n\) and the assumption that \(n \geq 2\) +intro n hn +- Use a construction to construct the list of integers +use List.replicate n 0 +- Simplify the conditions given the assumption \(n \geq 2\) +simp [hn, List.length, List.mem_replicate, List.mem_range, List.mem_filter] +- Use Aesop to complete the proof +<;> aesop" +``` + +# Claude CoTs: + +Let me verify my proof for this problem about distinct integers with divisibility properties... + +Looking at my current solution, I'm trying to prove that for any $n \geq 2$ , there exist $n$ distinct integers where the sum of any two is divisible by their difference. + +Wait, I see an issue here. I've used '[0, 1]' as my example list, but let me check if this actually satisfies the condition: + +- For $x = 0$ and $y = 1$ : $(x + y) \% (x - y) = (0 + 1) \% (0 - 1) = 1 \% (-1) = 0$ ? + +Let me verify this calculation. We have $1\%$ (-1) which should be the remainder when 1 is divided by -1. Since -1 divides 1 as $-1 \times (-1) = 1$ , the remainder is 0. So this seems correct. + +But wait, there's another issue. I've only constructed a list for $n = 2$ , but my proof needs to work for any $n \geq 2$ . The line: + +use [0, 1] + +only provides a list of length 2, but I need to handle cases where $n > 2$ as well. + +I need to generalize my construction. Perhaps I should create a list of length $n$ instead of just assuming the base case will handle everything. The 'simp' and 'aesop' tactics might not be able to automatically construct this list for arbitrary $n$ . + +Let me reconsider this approach... What if I use a more general construction? I could use 'List.replicate n 0' to create a list of $\mathbf{n}$ zeros. But then all elements would be the same, and the problem requires distinct integers. + +Actually, I should think more carefully about what list would satisfy the divisibility condition. If I have integers $a$ and $b$ where $a \neq b$ , then I need $(a + b) \% (a - b) = 0$ . + +For the general case, I might need to use a more sophisticated construction like arithmetic sequences or something based on factorials to ensure the divisibility property holds. + +The issue with my current proof is that I'm not providing a general construction for $n$ elements - I'm only handling $n = 2$ . And even if 'aesop' could somehow extend this, I haven't given it enough guidance on how to construct the larger list. + +To fix this, I should: + +1. Either provide an explicit construction for arbitrary $n$ +2. Or use induction properly, showing how to extend from $n$ to $n + 1$ elements + +I think I need to reconsider what list would work for this problem. Maybe something like powers of 2, or a list where consecutive elements have a specific relationship? The factorial approach mentioned in my comment might be promising - adding elements like $\mathbf{a}_1$ , $\mathbf{a}_2$ , ..., $\mathbf{a}_n$ where each new element is constructed to maintain the divisibility relationship with all previous elements. + +I should try constructing a list like $[0, 1!, 2!, \ldots, (n-1)!!]$ or something similar and verify that it satisfies the divisibility condition. This would give me distinct integers with potentially useful divisibility properties. + +# C. RL Details + +Figure 6 shows the overall distribution of valid proofs in pass@32 testing. We collect the distribution base on our Leanabell-Prover-GD-SFT model. + +![](images/777becb88cf144ee0b188a5eb4a011a795525f9e8a7316c04c0b6494231fdd17.jpg) +Figure 6 | Distribution of valid proofs in pass@32. We calculate this distribution based on the Leanabell-Prover-GD-SFT model. To build this set, we remain the formal statements who has at least sampled proofs are valid by Lean 4 compiler. \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06122/images/01687d0709017cc46d9129128904e3099ce96b65ce6ffb05bc736ea378baf0ac.jpg b/data/2025/2504_06xxx/2504.06122/images/01687d0709017cc46d9129128904e3099ce96b65ce6ffb05bc736ea378baf0ac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..09030214d24e8ecc21d8869076ab01c8b6c0174a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/images/01687d0709017cc46d9129128904e3099ce96b65ce6ffb05bc736ea378baf0ac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e31b500d1d72db20a1f3b8e56b7a13f1e703ee2049c314f2351093463299a1ff +size 54241 diff --git a/data/2025/2504_06xxx/2504.06122/images/24634dc2af6e2b833e2bb966dcb1e44f788c6180dc74eaacb30af951688f8d44.jpg b/data/2025/2504_06xxx/2504.06122/images/24634dc2af6e2b833e2bb966dcb1e44f788c6180dc74eaacb30af951688f8d44.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e205ad0c43d1a0d87c46808b9cac746a5ea9dc49 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/images/24634dc2af6e2b833e2bb966dcb1e44f788c6180dc74eaacb30af951688f8d44.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:601d1430c49a4d4dc09aaeb2abb96664ef1d625acb7363675e9ebcf8f6526b2a +size 13177 diff --git a/data/2025/2504_06xxx/2504.06122/images/2fc90dfe1c7eb7a35b8a8ef67fc0f69ff7ff4653848471c031d9155572184687.jpg b/data/2025/2504_06xxx/2504.06122/images/2fc90dfe1c7eb7a35b8a8ef67fc0f69ff7ff4653848471c031d9155572184687.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc0d836212e33071252890e5c1bdfeb18846f30b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/images/2fc90dfe1c7eb7a35b8a8ef67fc0f69ff7ff4653848471c031d9155572184687.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b3cc10060ea8e05741d01e15c8597ca89c8fb98391cfd71aaca89a6d0a43ccb +size 134005 diff --git a/data/2025/2504_06xxx/2504.06122/images/33a464e26cdd80f93c82ec770c3dc8cb34fc16b34875ff02895ac22cb7f8d7e9.jpg b/data/2025/2504_06xxx/2504.06122/images/33a464e26cdd80f93c82ec770c3dc8cb34fc16b34875ff02895ac22cb7f8d7e9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a4cb058af1bbb869575d72d6386cf680614dcfa9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/images/33a464e26cdd80f93c82ec770c3dc8cb34fc16b34875ff02895ac22cb7f8d7e9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f7bb03d47fff6f07a5216246be04fc88213903aac1cb167073136c97b77cd9a +size 38141 diff --git a/data/2025/2504_06xxx/2504.06122/images/35e63804bae7aac8349c0715b1dcd16bb5dda356dba66e77102f715225f4ac8d.jpg b/data/2025/2504_06xxx/2504.06122/images/35e63804bae7aac8349c0715b1dcd16bb5dda356dba66e77102f715225f4ac8d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..748aca99dd1f345ae110db1a193f543df4186bd1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/images/35e63804bae7aac8349c0715b1dcd16bb5dda356dba66e77102f715225f4ac8d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e29e8851a1169d34160db21d5be912b83a4b87370dbfce0f2de2fbc8d87bc06a +size 23186 diff --git a/data/2025/2504_06xxx/2504.06122/images/35f27ac621c7756bfb48ee9ba8225e75a714436d403dd6520c877e628634c3f7.jpg b/data/2025/2504_06xxx/2504.06122/images/35f27ac621c7756bfb48ee9ba8225e75a714436d403dd6520c877e628634c3f7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3795c4eec1f48b1ee6783fac0e88a1c41f8db457 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/images/35f27ac621c7756bfb48ee9ba8225e75a714436d403dd6520c877e628634c3f7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2dd31f81ca889aa4eb60fb38ed2ac79b4e1afac3a51302c563649ba10e2cd962 +size 18142 diff --git a/data/2025/2504_06xxx/2504.06122/images/3d92cbecdced18c0f702bb3e88a457e85c4a980d84f653daaa42c640ec2b117d.jpg b/data/2025/2504_06xxx/2504.06122/images/3d92cbecdced18c0f702bb3e88a457e85c4a980d84f653daaa42c640ec2b117d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..929366304281972d6eba4429139b77f5a4b4b47c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/images/3d92cbecdced18c0f702bb3e88a457e85c4a980d84f653daaa42c640ec2b117d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ab4544c132e86d00231e27d827be757173d7e1813582ba14953e531ef0f45e6 +size 27287 diff --git a/data/2025/2504_06xxx/2504.06122/images/7359a344dc4b38409c46e9cce1237d8e32c0529d3ecee4ce0d6110ab088be7ee.jpg b/data/2025/2504_06xxx/2504.06122/images/7359a344dc4b38409c46e9cce1237d8e32c0529d3ecee4ce0d6110ab088be7ee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9cda7025d0da1f95b0d4333532b9ad13a51cb6f4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/images/7359a344dc4b38409c46e9cce1237d8e32c0529d3ecee4ce0d6110ab088be7ee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54fb3eec9e2ba1a3bfe4893fe1b0728d8bc98932bb1bfe6945359f9f66bbe8c6 +size 104050 diff --git a/data/2025/2504_06xxx/2504.06122/images/777becb88cf144ee0b188a5eb4a011a795525f9e8a7316c04c0b6494231fdd17.jpg b/data/2025/2504_06xxx/2504.06122/images/777becb88cf144ee0b188a5eb4a011a795525f9e8a7316c04c0b6494231fdd17.jpg new file mode 100644 index 0000000000000000000000000000000000000000..29540cbe301064ae8bbc411c853eddbdb2cc7454 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/images/777becb88cf144ee0b188a5eb4a011a795525f9e8a7316c04c0b6494231fdd17.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2761f668460acdc0ab0b82531335217a77ad123694b07f8dd33cc7e437c313af +size 81068 diff --git a/data/2025/2504_06xxx/2504.06122/images/7f4f27568a48a2be4320d9e2cbf50395a8c162d8e7903c6e2cea1fd88a15c8ad.jpg b/data/2025/2504_06xxx/2504.06122/images/7f4f27568a48a2be4320d9e2cbf50395a8c162d8e7903c6e2cea1fd88a15c8ad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a076fc68ea40143b22c5408bd8ad80823adb88c1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/images/7f4f27568a48a2be4320d9e2cbf50395a8c162d8e7903c6e2cea1fd88a15c8ad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95cb5b50a3aa358faf9be750c407be555a35122f3883f2e5b65bb67e625d7961 +size 37673 diff --git a/data/2025/2504_06xxx/2504.06122/images/81ecbadfa92856e0289f8658a20571661770530cab22ef47491919d27bd74bce.jpg b/data/2025/2504_06xxx/2504.06122/images/81ecbadfa92856e0289f8658a20571661770530cab22ef47491919d27bd74bce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0169cb7c390a02821e109e4b5f50f00b9dabc073 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/images/81ecbadfa92856e0289f8658a20571661770530cab22ef47491919d27bd74bce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d38acdd4eebe2cf495afbaadc2d3eb9143a721d76945ac9e3d5fd37d8ca122b +size 57608 diff --git a/data/2025/2504_06xxx/2504.06122/images/b7cc8430f6be44158dab40da3bc8875fd37fa636e1efd34a5b31f9efe81e607d.jpg b/data/2025/2504_06xxx/2504.06122/images/b7cc8430f6be44158dab40da3bc8875fd37fa636e1efd34a5b31f9efe81e607d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9c42457afd7c8779537c5587cd1ec0f4dcfcea54 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/images/b7cc8430f6be44158dab40da3bc8875fd37fa636e1efd34a5b31f9efe81e607d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d48a060d3d8bd1dd9b3a704dd2c9475086fac8b53eb382e65503f1ba85b20e3 +size 47205 diff --git a/data/2025/2504_06xxx/2504.06122/images/c7758c7a762f4643a6567b155f612a84f49906540bd173b97b882308eb809395.jpg b/data/2025/2504_06xxx/2504.06122/images/c7758c7a762f4643a6567b155f612a84f49906540bd173b97b882308eb809395.jpg new file mode 100644 index 0000000000000000000000000000000000000000..139e79e3cad81578cb072dd6669e60c236040e5f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/images/c7758c7a762f4643a6567b155f612a84f49906540bd173b97b882308eb809395.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d4c1a03f5cefa5440d0d5b2a544649196a3206cd5867e13109c562d3ad8bd3b +size 21808 diff --git a/data/2025/2504_06xxx/2504.06122/images/d54d3dc99f0f5608177788c79a253bd2e0a99094c8f633e30787e913f16ecf88.jpg b/data/2025/2504_06xxx/2504.06122/images/d54d3dc99f0f5608177788c79a253bd2e0a99094c8f633e30787e913f16ecf88.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0a6fbd7c7966a9dcfdbb7856f22321fdbe9aa98c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/images/d54d3dc99f0f5608177788c79a253bd2e0a99094c8f633e30787e913f16ecf88.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:494bcde1faa4ffcf9d4a5034414d6d0982ad7ef2d65faa47e03839d4efc60ae8 +size 48070 diff --git a/data/2025/2504_06xxx/2504.06122/images/dfe665c5c33438b54706ae078adfaee87f1757e2977bb0cecd6b6df134887d2b.jpg b/data/2025/2504_06xxx/2504.06122/images/dfe665c5c33438b54706ae078adfaee87f1757e2977bb0cecd6b6df134887d2b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f3f0cf6d70a77f627c14dfe713d60a903779c74b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/images/dfe665c5c33438b54706ae078adfaee87f1757e2977bb0cecd6b6df134887d2b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95b39a48b9a7b130b7e4b6637db04028d92cab3b6554ca2a63e8992df5960a2e +size 7503 diff --git a/data/2025/2504_06xxx/2504.06122/images/f5772f681c658e0fdb28bb62d8059253c173aeed7968ce49052268e5a2e696b7.jpg b/data/2025/2504_06xxx/2504.06122/images/f5772f681c658e0fdb28bb62d8059253c173aeed7968ce49052268e5a2e696b7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b1a28013b8d092590716cf0206618f94b68ec097 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/images/f5772f681c658e0fdb28bb62d8059253c173aeed7968ce49052268e5a2e696b7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c3b87093050951cc0d97017f02b333ad7a460186f22d9c17bbc524b5991d577 +size 7572 diff --git a/data/2025/2504_06xxx/2504.06122/layout.json b/data/2025/2504_06xxx/2504.06122/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..8a334e3aff97344f91eb6b47f4ebc6b1fe0e4dae --- /dev/null +++ b/data/2025/2504_06xxx/2504.06122/layout.json @@ -0,0 +1,11059 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 77, + 112, + 517, + 133 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 112, + 517, + 133 + ], + "spans": [ + { + "bbox": [ + 77, + 112, + 517, + 133 + ], + "type": "text", + "content": "Leanabell-Prover: Posttraining Scaling in Formal Reasoning" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 131, + 158, + 462, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 158, + 462, + 188 + ], + "spans": [ + { + "bbox": [ + 131, + 158, + 462, + 188 + ], + "type": "text", + "content": "Jingyuan Zhang, Qi Wang, Xingguang Ji, Yahui Liu, Yang Yue, Fuzheng Zhang, Di Zhang, Guorui Zhou, Kun Gai" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 242, + 200, + 352, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 200, + 352, + 214 + ], + "spans": [ + { + "bbox": [ + 242, + 200, + 352, + 214 + ], + "type": "text", + "content": "Kuaishou Technology" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 267, + 249, + 327, + 264 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 267, + 249, + 327, + 264 + ], + "spans": [ + { + "bbox": [ + 267, + 249, + 327, + 264 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 66, + 285, + 527, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 285, + 527, + 472 + ], + "spans": [ + { + "bbox": [ + 66, + 285, + 527, + 472 + ], + "type": "text", + "content": "Recent advances in automated theorem proving (ATP) through LLMs have highlighted the potential of formal reasoning with Lean 4 codes. However, ATP has not yet been revolutionized by the recent posttraining scaling as demonstrated by Open AI O1/O3 and Deepseek R1. In this work, we investigate the entire posttraining of ATP, aiming to align it with breakthroughs in reasoning models in natural languages. To begin, we continual train current ATP models with a hybrid dataset, which consists of numerous statement-proof pairs, and additional data aimed at incorporating cognitive behaviors that emulate human reasoning and hypothesis refinement. Next, we explore reinforcement learning with the use of outcome reward returned by Lean 4 compiler. Through our designed continual training and reinforcement learning processes, we have successfully improved existing formal provers, including both DeepSeek-Prover-v1.5 and Goedel-Prover, achieving state-of-the-art performance in the field of whole-proof generation. For example, we achieve a " + }, + { + "bbox": [ + 66, + 285, + 527, + 472 + ], + "type": "inline_equation", + "content": "59.8\\%" + }, + { + "bbox": [ + 66, + 285, + 527, + 472 + ], + "type": "text", + "content": " pass rate (pass@32) on MiniF2F. This is an on-going project and we will progressively update our findings, release our data and training details." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 140, + 487, + 452, + 661 + ], + "blocks": [ + { + "bbox": [ + 140, + 487, + 452, + 661 + ], + "lines": [ + { + "bbox": [ + 140, + 487, + 452, + 661 + ], + "spans": [ + { + "bbox": [ + 140, + 487, + 452, + 661 + ], + "type": "image", + "image_path": "d54d3dc99f0f5608177788c79a253bd2e0a99094c8f633e30787e913f16ecf88.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 66, + 674, + 526, + 730 + ], + "lines": [ + { + "bbox": [ + 66, + 674, + 526, + 730 + ], + "spans": [ + { + "bbox": [ + 66, + 674, + 526, + 730 + ], + "type": "text", + "content": "Figure 1 | Benchmark performance on MiniF2F-test (Zheng et al., 2021). Our method boosts both the two baseline models after employing RL training. Goedel-Prover-RL is our implementation. Our framework surpasses DeepSeek-Prover-v1.5-RL and Goedel-Prover-SFT " + }, + { + "bbox": [ + 66, + 674, + 526, + 730 + ], + "type": "inline_equation", + "content": "6.6\\%" + }, + { + "bbox": [ + 66, + 674, + 526, + 730 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 66, + 674, + 526, + 730 + ], + "type": "inline_equation", + "content": "2.2\\%" + }, + { + "bbox": [ + 66, + 674, + 526, + 730 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 13, + 250, + 36, + 590 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 13, + 250, + 36, + 590 + ], + "spans": [ + { + "bbox": [ + 13, + 250, + 36, + 590 + ], + "type": "text", + "content": "arXiv:2504.06122v3 [cs.AI] 14 Jul 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 774, + 483, + 790 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 774, + 483, + 790 + ], + "spans": [ + { + "bbox": [ + 67, + 774, + 483, + 790 + ], + "type": "inline_equation", + "content": "^{\\text{念}}" + }, + { + "bbox": [ + 67, + 774, + 483, + 790 + ], + "type": "text", + "content": "Equal contributions, and order alphabetically by first name. " + }, + { + "bbox": [ + 67, + 774, + 483, + 790 + ], + "type": "inline_equation", + "content": "{}^{\\dagger}" + }, + { + "bbox": [ + 67, + 774, + 483, + 790 + ], + "type": "text", + "content": "Corresponding author." + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 84, + 162, + 98 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 84, + 162, + 98 + ], + "spans": [ + { + "bbox": [ + 69, + 84, + 162, + 98 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 111, + 526, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 111, + 526, + 246 + ], + "spans": [ + { + "bbox": [ + 69, + 111, + 526, + 246 + ], + "type": "text", + "content": "Recent large language models (LLMs), such as Open AI O1/O3 and Deepseek R1, which are enhanced by posttraining scaling, emerge with numerous powerful and intriguing reasoning behaviors (Guo et al., 2025; Anthropic, 2025; Team, 2025). Such LLMs have shown impressive performance in solving math problems with natural language. However, the long chain-of-thoughts (CoTs) and final answers in natural language (NL) are substantially challenging for peer review (Wang et al., 2024), especially for theorem proving. Meanwhile, the key advantage of formal languages lies in their verifiability—each reasoning step can be validated by formal theorem verifiers, e.g., Lean (De Moura et al., 2015; Moura and Ullrich, 2021) and Isabelle (Paulson, 1994). As a promising direction, automated theorem proving (ATP) with formal languages (FL) has attracted booming attention from the community of large language models (LLMs)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 253, + 526, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 253, + 526, + 387 + ], + "spans": [ + { + "bbox": [ + 69, + 253, + 526, + 387 + ], + "type": "text", + "content": "Contrary to solving math problems with natural language, generating proofs using Lean 4 codes (or other formal languages) is more challenging. For example, DeepSeek-Prover v1.5-RL (Xin et al., 2024) achieves only " + }, + { + "bbox": [ + 69, + 253, + 526, + 387 + ], + "type": "inline_equation", + "content": "50.0\\%" + }, + { + "bbox": [ + 69, + 253, + 526, + 387 + ], + "type": "text", + "content": " (pass@32) on the Olympiad-level mathematics benchmark MiniF2F (Zheng et al., 2021). However, DeepSeek-R1 (Guo et al., 2025) can achieve " + }, + { + "bbox": [ + 69, + 253, + 526, + 387 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 69, + 253, + 526, + 387 + ], + "type": "text", + "content": " on the same math problems, but in natural language. DeepSeek-Prover-v1.5 (Lin et al., 2025) and STP (Dong and Ma, 2025) show that using extensive synthetic dataset of formal statements and expert iteration (Polu et al., 2022) can boost the whole-proof prover. Meanwhile, some methods (Yang et al., 2023; Wu et al., 2024; Xin et al., 2025) scale up the search budget (e.g., more than 2 million in BFS-Prover (Xin et al., 2025)) for step-wise tactic generation, which seems extremely computational." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 395, + 526, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 395, + 526, + 489 + ], + "spans": [ + { + "bbox": [ + 69, + 395, + 526, + 489 + ], + "type": "text", + "content": "Although RL strategies have already proven their effectiveness in natural language for math problem solving, the performance in formal reasoning has been rather ordinary so far. We find that only Deepseek-Prover releases it RL version, and DeepSeek-Prover-v1.5-RL marginally improves " + }, + { + "bbox": [ + 69, + 395, + 526, + 489 + ], + "type": "inline_equation", + "content": "1.8\\%" + }, + { + "bbox": [ + 69, + 395, + 526, + 489 + ], + "type": "text", + "content": " than its supervised fine-tuned model. Compared to the success of reinforcement learning (RL) in natural language reasoning, the potential for improvement in formal language reasoning may still be vast. However, replicating the current successful RL training approaches, which primarily focus on the Qwen2.5 model series, is not straightforward." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 496, + 526, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 496, + 526, + 645 + ], + "spans": [ + { + "bbox": [ + 69, + 496, + 526, + 645 + ], + "type": "text", + "content": "To verify the posttraining scaling in ATP, we begin with the standard whole-proof generation models DeepSeek-Prover-v1.5-SFT and Goedel-Prover-SFT. There are three training stages in our optimization framework. We first collect public and synthetic data to continue training. We also utilize automatic synthetic CoT data to embed the self-reflection capabilities, such as backtracking (abandoning failing approaches) and verification (systematic error-checking), to the fine-tuned model. Next, we employ the GRPO algorithm (Shao et al., 2024) to perform reinforcement learning from proof assistant feedback (RLPAF) on the supervised fine-tuned model. Similar to DeepSeek-Prover-v1.5-RL, the verification results from the Lean compiler serve as reward supervision. After analyzing the validation results on benchmarks, we find our posttraining strategies can effectively boost the overall performance on MiniF2F (Zheng et al., 2021) benchmark." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 86, + 652, + 305, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 652, + 305, + 665 + ], + "spans": [ + { + "bbox": [ + 86, + 652, + 305, + 665 + ], + "type": "text", + "content": "In summary, here are our main contributions:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 83, + 677, + 523, + 758 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 83, + 677, + 523, + 730 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 677, + 523, + 730 + ], + "spans": [ + { + "bbox": [ + 83, + 677, + 523, + 730 + ], + "type": "text", + "content": "- We continue train current APT models with more high quality statement-proof data pairs. More importantly, we design synthetic data to enhance the models' self-reflection capabilities, enabling us to pilot cognitive behaviors in our models before applying the RL algorithm." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 83, + 732, + 523, + 758 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 732, + 523, + 758 + ], + "spans": [ + { + "bbox": [ + 83, + 732, + 523, + 758 + ], + "type": "text", + "content": "- We investigate the RL training to boost the ATP prover that generates whole mathematical proofs in Lean 4 codes. During training, we employ the Lean 4 verifier to serve as a reward" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 777, + 300, + 786 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 777, + 300, + 786 + ], + "spans": [ + { + "bbox": [ + 293, + 777, + 300, + 786 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 95, + 86, + 157, + 98 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 86, + 157, + 98 + ], + "spans": [ + { + "bbox": [ + 95, + 86, + 157, + 98 + ], + "type": "text", + "content": "supervision." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 99, + 526, + 167 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 83, + 99, + 525, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 99, + 525, + 126 + ], + "spans": [ + { + "bbox": [ + 83, + 99, + 525, + 126 + ], + "type": "text", + "content": "- The proposed Leanabelle-Prover achieves state-of-the-art performance through our meticulously designed strategy, " + }, + { + "bbox": [ + 83, + 99, + 525, + 126 + ], + "type": "inline_equation", + "content": "59.8\\%" + }, + { + "bbox": [ + 83, + 99, + 525, + 126 + ], + "type": "text", + "content": " (pass@32) on MiniF2F-test." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 84, + 126, + 526, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 126, + 526, + 167 + ], + "spans": [ + { + "bbox": [ + 84, + 126, + 526, + 167 + ], + "type": "text", + "content": "- Currently, we collect around 1.52M formal statements, and 0.22M formal statements with detailed informal CoTs and verified proofs. All intermediate models and training data are released to the community1." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 187, + 168, + 201 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 187, + 168, + 201 + ], + "spans": [ + { + "bbox": [ + 67, + 187, + 168, + 201 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 214, + 525, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 214, + 525, + 257 + ], + "spans": [ + { + "bbox": [ + 67, + 214, + 525, + 257 + ], + "type": "text", + "content": "**Lean4 Theorem Proving using LLMs.** With the rapid progress of LLMs, research has explored applying LLMs in FL reasoning to automate theorem proving. Prior research can be briefly classified into two strategies, namely proof-step generation and whole-proof generation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 66, + 261, + 527, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 261, + 527, + 425 + ], + "spans": [ + { + "bbox": [ + 66, + 261, + 527, + 425 + ], + "type": "text", + "content": "Proof-step generation methods train an LLM agent to iteratively generate proof steps by predicting the next tactic based on the current proof state (Polu and Sutskever, 2020; Polu et al., 2022; Lample et al., 2022; Azerbayev et al., 2023; Yang et al., 2023; Lin et al., 2024; DeepMind, 2024; Trinh et al., 2024; Wu et al., 2024; Xin et al., 2024; Li et al., 2024; Xin et al., 2025). These methods apply FL executor to verify after each step of generation and is able to discover some non-trivial proofs. For example, LeanDojo (Yang et al., 2023) first establishes relationship models between various tactic states within proofs. It then retrieves relevant premises from the mathematical library based on the current output state (as collected from a Lean verifier) and inputs these premises into an encoder-decoder model to generate the subsequent tactic. Employing Monte-Carlo tree search (MCTS) (Coulom, 2006) is another common solution in this field. However, as the complexity of the proof increases, tree search methods become computationally expensive and lack high-level NL planning to control the overall structure of the proof (Wang et al., 2025)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 66, + 430, + 525, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 430, + 525, + 567 + ], + "spans": [ + { + "bbox": [ + 66, + 430, + 525, + 567 + ], + "type": "text", + "content": "Whole-proof generation methods treat theorem proving as a kind of code generation problem, where LLMs generate the entire proof in a single attempt by using supervised training or prompt engineering (Xin et al., 2024; Lin et al., 2025; Dong and Ma, 2025; Wang et al., 2025). This approach leverages the NL reasoning and high-level planning capabilities of LLMs with predictable computation costs, but lacks intermediate feedback from FL executors. Thus, the core challenge for improving whole-proof generation is that there are no sufficient Lean 4 codes to eliminate the gaps between NL and FL modalities. However, generating such data requires high levels of expertise, making it difficult to scale. As a result, the generated proofs often lack post-hoc analysis of errors and tend to perform badly on tedious questions that require non-trivial solutions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 66, + 587, + 525, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 587, + 525, + 724 + ], + "spans": [ + { + "bbox": [ + 66, + 587, + 525, + 724 + ], + "type": "text", + "content": "Reinforcement Learning for Lean4 Theorem Proving. There are two typical solutions to utilize RL for Lean4 Theorem Proving. In DeepSeek-Prover-v1.5-RL (Xin et al., 2024), the authors employ GRPO algorithm and takes the feedback signals from Lean 4 verifier as reward that reveals the proofs verified as correct or wrong. Such methods only uses the compilation feedback from the entire proof process as the reward result. In this paper, we employ the whole-proof generation approach, so we continue with this same solution. In contrast, Xin et al. (2025) use DPO (Rafailov et al., 2023) to refine the policy LLM by leveraging preference pairs naturally generated during tree search such as MCTS (Coulom, 2006). Therefore, the second solution utilizes the tactic state of each step during the compilation process. However, the effectiveness of existing methods still need improvement." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 83, + 746, + 340, + 758 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 746, + 340, + 758 + ], + "spans": [ + { + "bbox": [ + 83, + 746, + 340, + 758 + ], + "type": "text", + "content": "1https://github.com/Leanabell-LM/Leanabell-Prover" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 293, + 776, + 301, + 787 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 776, + 301, + 787 + ], + "spans": [ + { + "bbox": [ + 293, + 776, + 301, + 787 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 66, + 85, + 528, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 85, + 528, + 195 + ], + "spans": [ + { + "bbox": [ + 66, + 85, + 528, + 195 + ], + "type": "text", + "content": "Cognitive Behaviors Gandhi et al. (2025) first reveal that models without integrating human-like reasoning abilities (e.g., verification, backtracking, subgoal setting and backward chaining) are hard to obtain significant improvements by directly applying RL algorithms. Similarly, Wan et al. (2025) propose that LLMs lack a specialized design for acquiring meta-thinking, resulting in low efficacy. Zeng et al. (2025) and Liu et al. (2025) propose that the training may directly start from the base models with such cognitive behaviors—a paradigm referred to as zero RL training. Considering the barely satisfactory performance of RL strategies in the formal reasoning field, we have reasons to suspect whether this is due to this fundamental reason." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 214, + 180, + 230 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 214, + 180, + 230 + ], + "spans": [ + { + "bbox": [ + 67, + 214, + 180, + 230 + ], + "type": "text", + "content": "3. Model Training" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 243, + 189, + 257 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 243, + 189, + 257 + ], + "spans": [ + { + "bbox": [ + 67, + 243, + 189, + 257 + ], + "type": "text", + "content": "3.1. Continual Training" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 66, + 264, + 527, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 264, + 527, + 346 + ], + "spans": [ + { + "bbox": [ + 66, + 264, + 527, + 346 + ], + "type": "text", + "content": "Base Model. We begin with the previous whole-proof generation models DeepSeek-Prover v1.5-SFT (Xin et al., 2024) and Goedel-Prover (Lin et al., 2025) that are two well-trained versions after the supervised fine-tuning stage. Both of these two models are with 7 billion parameters. Specifically, the two models are trained with proofs added detailed explanatory informal comments. Therefore, the model possesses the basic ability to align natural language descriptions with Lean 4 codes." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 71, + 356, + 521, + 624 + ], + "blocks": [ + { + "bbox": [ + 71, + 356, + 521, + 624 + ], + "lines": [ + { + "bbox": [ + 71, + 356, + 521, + 624 + ], + "spans": [ + { + "bbox": [ + 71, + 356, + 521, + 624 + ], + "type": "image", + "image_path": "7359a344dc4b38409c46e9cce1237d8e32c0529d3ecee4ce0d6110ab088be7ee.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 634, + 527, + 663 + ], + "lines": [ + { + "bbox": [ + 67, + 634, + 527, + 663 + ], + "spans": [ + { + "bbox": [ + 67, + 634, + 527, + 663 + ], + "type": "text", + "content": "Figure 2 | Distributions of math domains in various Lean 4 dataset. Lean Workbook, Goedel-Prover, STP Lean and NuminaMath are training set. MiniF2F and ProofNet are test set." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 693, + 527, + 735 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 693, + 527, + 735 + ], + "spans": [ + { + "bbox": [ + 67, + 693, + 527, + 735 + ], + "type": "text", + "content": "Statement Formalization. Similar to Lin et al. (2025), we train a formalizer, based on Qwen2.5-32B-Coder-Instruct (Hui et al., 2024), to convert the olympiad-level math problems in natural language into formal statements. We collect the formal and informal statement pairs sourced" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 777, + 301, + 787 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 777, + 301, + 787 + ], + "spans": [ + { + "bbox": [ + 293, + 777, + 301, + 787 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 156, + 83, + 438, + 198 + ], + "blocks": [ + { + "bbox": [ + 156, + 83, + 438, + 198 + ], + "lines": [ + { + "bbox": [ + 156, + 83, + 438, + 198 + ], + "spans": [ + { + "bbox": [ + 156, + 83, + 438, + 198 + ], + "type": "table", + "html": "
Data SourceFSFS+IC+PF
Lean Workbook (Ying et al., 2024)140K40K
STP-Lean (Dong and Ma, 2025)400K36K
NuminaMath (Li et al., 2024)520K97K
AoPS (AoPS)370K26K
PromptCoT (Zhao et al., 2025)90K20K
Total1.52M0.22M
", + "image_path": "b7cc8430f6be44158dab40da3bc8875fd37fa636e1efd34a5b31f9efe81e607d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 206, + 525, + 248 + ], + "lines": [ + { + "bbox": [ + 67, + 206, + 525, + 248 + ], + "spans": [ + { + "bbox": [ + 67, + 206, + 525, + 248 + ], + "type": "text", + "content": "Table 1 | Distribution of our training sources, including released data by existing models and synthetic data from informal math problems. FS, PF and IC refer to formal statements, proofs and informal comments, respectively." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 66, + 267, + 526, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 267, + 526, + 379 + ], + "spans": [ + { + "bbox": [ + 66, + 267, + 526, + 379 + ], + "type": "text", + "content": "from Goedel-Prover " + }, + { + "bbox": [ + 66, + 267, + 526, + 379 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 66, + 267, + 526, + 379 + ], + "type": "text", + "content": " (around 30K). The training details of our formalizer are presented in Appendix A. With the trained formalizer, we convert the natural language statement and then verify with Lean 4 compiler. After these procedures, we finally get 520K formal statements from NuminaMath (Li et al., 2024) and 370K formal statements from AoPS (AoPS). Recently, Zhao et al. (2025) proposed to utilize LLMs to synthesize math problems through mathematical concepts. Based on the data analysis in Figure 2, we employ the PromptCoT framework to synthesize math problems at different levels, including AMC, AIME and USAMO, on three majority math domains (i.e., algebra, number theory and calculus)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 66, + 398, + 525, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 398, + 525, + 562 + ], + "spans": [ + { + "bbox": [ + 66, + 398, + 525, + 562 + ], + "type": "text", + "content": "Data Curation. We develop a comprehensive Lean 4 code completion dataset for the supervised fine-tuning, as shown in Table 1. These theorems are sourced from various projects, such as the standard Lean 4 math library Mathlib4 (mathlib4, 2025), Lean Workbook (Ying et al., 2024), synthetic theorems from Goedel-Prover (Lin et al., 2025) and STP (Dong and Ma, 2025). Besides the above-mentioned public data, we use deepseek-prover and goedel-prover to sample proofs that can be correctly verified by Lean 4 compiler. In this manner, we collect Lean data from NuminaMath and AoPS. Similarly, we formalize around 90K math problems synthesized through PromptCoT. In DeepSeek-Prover-v1.5 (Xin et al., 2024), the authors claim that incorporating natural language reasoning before generating theorem proof code can eliminate the gap between problem solving strategies in natural language and theorem proving in Lean. Thus, we also collect comments for part of the data. Finally, we collected around 1.52M formal statements and 0.22M statements with detailed informal comments and verified proofs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 66, + 567, + 525, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 567, + 525, + 637 + ], + "spans": [ + { + "bbox": [ + 66, + 567, + 525, + 637 + ], + "type": "text", + "content": "As shown in Figure 2, we find that the data distributions of Lean Workbook, Goedel-Prover, STP Lean and NuminaMath cover well the MiniF2F test set. On the contrast, there is an obvious domain bias for ProofNet, which is also revealed by (Lin et al., 2025). It indicates a promising direction for further improvement by expanding data in specific mathematical domains. For fairness, we do not adjust the distributions of the training set in our optimizations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 66, + 657, + 525, + 726 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 657, + 525, + 726 + ], + "spans": [ + { + "bbox": [ + 66, + 657, + 525, + 726 + ], + "type": "text", + "content": "Implementation After obtaining a large collection of formalized statements with proofs, we continuously train once. We use the lightweight framework SWIFT3 for the supervised fine-tuning (SFT). The SFT experiment is trained on 8 NVIDIA H100 GPUs with the following hyperparameters: a learning rate of " + }, + { + "bbox": [ + 66, + 657, + 525, + 726 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-5}" + }, + { + "bbox": [ + 66, + 657, + 525, + 726 + ], + "type": "text", + "content": ", a global batch size of 32 over 2 epochs, and a weight decay coefficient of 0.1." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 82, + 735, + 414, + 747 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 735, + 414, + 747 + ], + "spans": [ + { + "bbox": [ + 82, + 735, + 414, + 747 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 82, + 735, + 414, + 747 + ], + "type": "text", + "content": "https://huggingface.co/datasets/Goedel-LM/Lean-workbook-proofs" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 83, + 747, + 287, + 758 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 747, + 287, + 758 + ], + "spans": [ + { + "bbox": [ + 83, + 747, + 287, + 758 + ], + "type": "text", + "content": "3https://github.com/modelscope/ms-swift" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 293, + 776, + 301, + 787 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 776, + 301, + 787 + ], + "spans": [ + { + "bbox": [ + 293, + 776, + 301, + 787 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 85, + 254, + 100 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 85, + 254, + 100 + ], + "spans": [ + { + "bbox": [ + 67, + 85, + 254, + 100 + ], + "type": "text", + "content": "3.2. Integrating Cognitive Behaviors" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 107, + 526, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 107, + 526, + 163 + ], + "spans": [ + { + "bbox": [ + 67, + 107, + 526, + 163 + ], + "type": "text", + "content": "Gandhi et al. (2025) have revealed the importance of cognitive behaviors in base models before utilizing RL strategies. Following a similar idea, we induce reflection-like behaviors through specially designed interventions. We propose two kinds of CoT templates to integrate the self-reflection capabilities: Lean completion and rewriting." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 66, + 184, + 526, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 184, + 526, + 280 + ], + "spans": [ + { + "bbox": [ + 66, + 184, + 526, + 280 + ], + "type": "text", + "content": "Lean Completion. We sample 5K formal statements used in our previous continue training stage, which are not " + }, + { + "bbox": [ + 66, + 184, + 526, + 280 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 66, + 184, + 526, + 280 + ], + "type": "text", + "content": " correct in the pass@16 sampling. We find the position where the first error tactic appears, and re-sample for completion. Once collecting new valid proofs, we can use the following prompt to ask Claude (Anthropic, 2025) to generate the reflection response. We observe that an effective reflection process can only be achieved by combining incorrect proofs, correct proofs, and tactic error messages. Thus, the feedback collected from Lean verifier is also used to create the prompt. An example is presented in Section B.1." + } + ] + } + ], + "index": 2 + }, + { + "type": "code", + "bbox": [ + 69, + 295, + 523, + 650 + ], + "blocks": [ + { + "bbox": [ + 69, + 295, + 523, + 650 + ], + "lines": [ + { + "bbox": [ + 69, + 295, + 523, + 650 + ], + "spans": [ + { + "bbox": [ + 69, + 295, + 523, + 650 + ], + "type": "text", + "content": "Lean Completion Prompt (Claude) \n# Initial Proof \n' " + }, + { + "bbox": [ + 69, + 295, + 523, + 650 + ], + "type": "inline_equation", + "content": "\\text{巧} ^ { \\prime }" + }, + { + "bbox": [ + 69, + 295, + 523, + 650 + ], + "type": "text", + "content": " lean4 \n{old_code} \n# Lean Feedback \n{error} \n# Correct Proof \n' " + }, + { + "bbox": [ + 69, + 295, + 523, + 650 + ], + "type": "inline_equation", + "content": "\\text{巧} ^ { \\prime }" + }, + { + "bbox": [ + 69, + 295, + 523, + 650 + ], + "type": "text", + "content": " lean4 \n{new_code} \nYour task is to generate a reflection of a Lean4 proof as follows: 1. You are provided with a lean proof code that failed to complete the proof, the verify feedback, and a revised correct proof. 2. You need to act as a verifier to check the code step by step and point out where the code fails with incorrect tactics. 3. Provide an alternative method, such as those in the correct proof. 4. Act as you are verifying your own proof.. Here are some rules you need to follow: 1. At the beginning, you should start with a conjunction phrase such as 'let's verify' and claim you need to verify the proof. 2. Instead of directly pointing out the issue, your answer should show the process to identify the incorrect step. 3. Do not refer to Lean Feedback, Correct Proof, or anything that shows you have already known the issue before your reflection. 4. Do not provide any new Lean4 code block, you don't need to write a correct proof. 5. Do not include a summary section. 6. Again, do not refer to Lean Feedback, Correct Proof, do not write anything like 'as shown in the correct proof'. Now, start with a conjunction phrase and require you need to check the proof, do not directly claim there is an issue." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 67, + 671, + 526, + 755 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 671, + 526, + 755 + ], + "spans": [ + { + "bbox": [ + 67, + 671, + 526, + 755 + ], + "type": "text", + "content": "Rewriting. Based on the above-mentioned Lean completion, there are two main steps in the rewriting strategy. First, we suspect that the generation of incorrect proofs is, to some extent, due to the incorrect problem-solving comments being generated. Therefore, we introduce Qwen2.5-72B-instruct (Team, 2024) to evaluate the problem-solving comments and then regenerate the correct problem-solving comments. Second, we provide Claude with both the invalid and newly rewritten valid Lean 4 code to generate comprehensive Chains of Thought (CoTs) that explain" + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 777, + 301, + 787 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 777, + 301, + 787 + ], + "spans": [ + { + "bbox": [ + 293, + 777, + 301, + 787 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 85, + 524, + 112 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 85, + 524, + 112 + ], + "spans": [ + { + "bbox": [ + 67, + 85, + 524, + 112 + ], + "type": "text", + "content": "the reasoning process. In this manner, we collect 19K samples with CoTs (See the detailed examples in Appendix B.2). Here are the prompt templates for these two steps:" + } + ] + } + ], + "index": 0 + }, + { + "type": "code", + "bbox": [ + 77, + 142, + 509, + 200 + ], + "blocks": [ + { + "bbox": [ + 189, + 128, + 403, + 139 + ], + "lines": [ + { + "bbox": [ + 189, + 128, + 403, + 139 + ], + "spans": [ + { + "bbox": [ + 189, + 128, + 403, + 139 + ], + "type": "text", + "content": "Rewriting - Step 1 (Qwen2.5-72B-Instruct)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 77, + 142, + 509, + 200 + ], + "lines": [ + { + "bbox": [ + 77, + 142, + 509, + 200 + ], + "spans": [ + { + "bbox": [ + 77, + 142, + 509, + 200 + ], + "type": "text", + "content": "You are an experienced mathematics evaluation teacher. You will be provided with a math problem and the corresponding solution idea.. \nPlease determine whether the solution idea is correct. If it is, please output \"Correct\", otherwise please output \"Incorrect\". If the solution idea is incorrect, please provide the correct solution idea, and the output of the solution idea should be included within \\*\\* and \\*\\*." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 80, + 209, + 232, + 220 + ], + "blocks": [ + { + "bbox": [ + 80, + 209, + 232, + 220 + ], + "lines": [ + { + "bbox": [ + 80, + 209, + 232, + 220 + ], + "spans": [ + { + "bbox": [ + 80, + 209, + 232, + 220 + ], + "type": "text", + "content": "The output format is as follows:" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 80, + 227, + 352, + 238 + ], + "blocks": [ + { + "bbox": [ + 80, + 227, + 352, + 238 + ], + "lines": [ + { + "bbox": [ + 80, + 227, + 352, + 238 + ], + "spans": [ + { + "bbox": [ + 80, + 227, + 352, + 238 + ], + "type": "text", + "content": "1. Judgement: Incorrect. Solution: “‘‘Solution idea’’”" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 81, + 238, + 194, + 248 + ], + "blocks": [ + { + "bbox": [ + 81, + 238, + 194, + 248 + ], + "lines": [ + { + "bbox": [ + 81, + 238, + 194, + 248 + ], + "spans": [ + { + "bbox": [ + 81, + 238, + 194, + 248 + ], + "type": "text", + "content": "2. Judgement: Correct." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + } + ], + "index": 5, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 80, + 256, + 176, + 285 + ], + "blocks": [ + { + "bbox": [ + 80, + 256, + 176, + 285 + ], + "lines": [ + { + "bbox": [ + 80, + 256, + 176, + 285 + ], + "spans": [ + { + "bbox": [ + 80, + 256, + 176, + 285 + ], + "type": "text", + "content": "[math problem start] \n{problem} \n[math problem end]" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "code_body" + } + ], + "index": 7, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 80, + 294, + 180, + 323 + ], + "blocks": [ + { + "bbox": [ + 80, + 294, + 180, + 323 + ], + "lines": [ + { + "bbox": [ + 80, + 294, + 180, + 323 + ], + "spans": [ + { + "bbox": [ + 80, + 294, + 180, + 323 + ], + "type": "text", + "content": "[solution idea start] \n{solution} \n[solution idea end]" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "code_body" + } + ], + "index": 8, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 67, + 353, + 526, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 353, + 526, + 393 + ], + "spans": [ + { + "bbox": [ + 67, + 353, + 526, + 393 + ], + "type": "text", + "content": "With these synthesized data, we employ our second-stage continual training, with a learning rate of " + }, + { + "bbox": [ + 67, + 353, + 526, + 393 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-5}" + }, + { + "bbox": [ + 67, + 353, + 526, + 393 + ], + "type": "text", + "content": " and overall batch size of 1024 for one epoch. Finally, we obtain the model, named as Leanabell-Prover-SFT." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 412, + 214, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 412, + 214, + 426 + ], + "spans": [ + { + "bbox": [ + 67, + 412, + 214, + 426 + ], + "type": "text", + "content": "3.3. Reinforcement Learning" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 434, + 525, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 434, + 525, + 502 + ], + "spans": [ + { + "bbox": [ + 67, + 434, + 525, + 502 + ], + "type": "text", + "content": "We integrate reinforcement learning (RL) with the Lean 4 theorem prover to automate the discovery of valid proofs. The RL agent interacts with the Lean 4 environment, generating whole proofs and receiving feedback from Lean 4 compiler as reward signals. The agent's objective is to maximize cumulative rewards by learning to generate syntactically correct, logically valid proofs for an input formal statement." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 523, + 526, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 523, + 526, + 605 + ], + "spans": [ + { + "bbox": [ + 67, + 523, + 526, + 605 + ], + "type": "text", + "content": "Policy Optimization Algorithms. We employ the recent GRPO (Shao et al., 2024) as our RL algorithm. For each input formal statement " + }, + { + "bbox": [ + 67, + 523, + 526, + 605 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 67, + 523, + 526, + 605 + ], + "type": "text", + "content": ", GRPO samples a group of outputs " + }, + { + "bbox": [ + 67, + 523, + 526, + 605 + ], + "type": "inline_equation", + "content": "\\{o_1, o_2, \\dots, o_G\\}" + }, + { + "bbox": [ + 67, + 523, + 526, + 605 + ], + "type": "text", + "content": " from the old policy " + }, + { + "bbox": [ + 67, + 523, + 526, + 605 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta_{old}}" + }, + { + "bbox": [ + 67, + 523, + 526, + 605 + ], + "type": "text", + "content": " and then collect the feedback " + }, + { + "bbox": [ + 67, + 523, + 526, + 605 + ], + "type": "inline_equation", + "content": "\\{\\tau_1, \\tau_2, \\dots, \\tau_G\\}" + }, + { + "bbox": [ + 67, + 523, + 526, + 605 + ], + "type": "text", + "content": " for the group of responses through Lean 4 compiler. According to each feedback status " + }, + { + "bbox": [ + 67, + 523, + 526, + 605 + ], + "type": "inline_equation", + "content": "\\tau_i" + }, + { + "bbox": [ + 67, + 523, + 526, + 605 + ], + "type": "text", + "content": ", we assign a particular reward. Then, the advantage of the " + }, + { + "bbox": [ + 67, + 523, + 526, + 605 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 67, + 523, + 526, + 605 + ], + "type": "text", + "content": "-th output is calculated by normalizing the group-level rewards " + }, + { + "bbox": [ + 67, + 523, + 526, + 605 + ], + "type": "inline_equation", + "content": "\\{R_1, R_2, \\dots, R_G\\}" + }, + { + "bbox": [ + 67, + 523, + 526, + 605 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 236, + 603, + 525, + 635 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 603, + 525, + 635 + ], + "spans": [ + { + "bbox": [ + 236, + 603, + 525, + 635 + ], + "type": "interline_equation", + "content": "\\hat {A} _ {i, t} = \\frac {r _ {i} - \\operatorname {m e a n} \\left(\\left\\{R _ {i} \\right\\} _ {i = 1} ^ {G}\\right)}{\\operatorname {s t d} \\left(\\left\\{R _ {i} \\right\\} _ {i = 1} ^ {G}\\right)}. \\tag {1}", + "image_path": "f5772f681c658e0fdb28bb62d8059253c173aeed7968ce49052268e5a2e696b7.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 639, + 461, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 639, + 461, + 653 + ], + "spans": [ + { + "bbox": [ + 67, + 639, + 461, + 653 + ], + "type": "text", + "content": "Finally, we optimizes the policy model " + }, + { + "bbox": [ + 67, + 639, + 461, + 653 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 67, + 639, + 461, + 653 + ], + "type": "text", + "content": " by maximizing the following objective:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 129, + 661, + 525, + 718 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 661, + 525, + 718 + ], + "spans": [ + { + "bbox": [ + 129, + 661, + 525, + 718 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {J} _ {\\mathrm {G R P O}} (\\theta) = \\mathbb {E} _ {(q, a) \\sim \\mathcal {D}, \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta_ {\\mathrm {o l d}}} (\\cdot | q)} \\\\ \\left. \\right.\\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\frac {1}{| o _ {i} |} \\sum_ {t = 1} ^ {| o _ {i} |} \\left(\\min \\left(r _ {i, t} (\\theta) \\hat {A} _ {i, t}, \\operatorname {c l i p} \\left(r _ {i, t} (\\theta), 1 - \\varepsilon , 1 + \\varepsilon\\right) \\hat {A} _ {i, t}\\right)\\right)\\right], \\tag {2} \\\\ \\end{array}", + "image_path": "35f27ac621c7756bfb48ee9ba8225e75a714436d403dd6520c877e628634c3f7.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 67, + 724, + 101, + 735 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 724, + 101, + 735 + ], + "spans": [ + { + "bbox": [ + 67, + 724, + 101, + 735 + ], + "type": "text", + "content": "where" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 232, + 732, + 525, + 762 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 232, + 732, + 525, + 762 + ], + "spans": [ + { + "bbox": [ + 232, + 732, + 525, + 762 + ], + "type": "interline_equation", + "content": "r _ {i, t} (\\theta) = \\frac {\\pi_ {\\theta} \\left(o _ {i , t} \\mid q , o _ {i , < t}\\right)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} \\left(o _ {i , t} \\mid q , o _ {i , < t}\\right)}, \\tag {3}", + "image_path": "dfe665c5c33438b54706ae078adfaee87f1757e2977bb0cecd6b6df134887d2b.jpg" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 777, + 300, + 786 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 777, + 300, + 786 + ], + "spans": [ + { + "bbox": [ + 293, + 777, + 300, + 786 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 85, + 528, + 113 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 85, + 528, + 113 + ], + "spans": [ + { + "bbox": [ + 67, + 85, + 528, + 113 + ], + "type": "inline_equation", + "content": "\\varepsilon" + }, + { + "bbox": [ + 67, + 85, + 528, + 113 + ], + "type": "text", + "content": " is a hyperparameter. In our experiments, we set " + }, + { + "bbox": [ + 67, + 85, + 528, + 113 + ], + "type": "inline_equation", + "content": "\\varepsilon = 0.2" + }, + { + "bbox": [ + 67, + 85, + 528, + 113 + ], + "type": "text", + "content": ". Notably, we do not use the Kullback-Leibler (KL) divergence penalty." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 225, + 130, + 368, + 142 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 130, + 368, + 142 + ], + "spans": [ + { + "bbox": [ + 225, + 130, + 368, + 142 + ], + "type": "text", + "content": "Rewriting - Step 2 (Claude)" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 80, + 146, + 139, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 146, + 139, + 154 + ], + "spans": [ + { + "bbox": [ + 80, + 146, + 139, + 154 + ], + "type": "text", + "content": "Wrong code" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 80, + 155, + 120, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 155, + 120, + 163 + ], + "spans": [ + { + "bbox": [ + 80, + 155, + 120, + 163 + ], + "type": "text", + "content": "\" ' lean4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 81, + 165, + 129, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 165, + 129, + 174 + ], + "spans": [ + { + "bbox": [ + 81, + 165, + 129, + 174 + ], + "type": "text", + "content": "lean code1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 81, + 174, + 96, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 174, + 96, + 181 + ], + "spans": [ + { + "bbox": [ + 81, + 174, + 96, + 181 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 80, + 193, + 149, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 193, + 149, + 201 + ], + "spans": [ + { + "bbox": [ + 80, + 193, + 149, + 201 + ], + "type": "text", + "content": "Correct code" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 81, + 202, + 120, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 202, + 120, + 211 + ], + "spans": [ + { + "bbox": [ + 81, + 202, + 120, + 211 + ], + "type": "text", + "content": "\" ' lean4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 81, + 212, + 130, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 212, + 130, + 221 + ], + "spans": [ + { + "bbox": [ + 81, + 212, + 130, + 221 + ], + "type": "text", + "content": "lean code2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 81, + 222, + 96, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 222, + 96, + 227 + ], + "spans": [ + { + "bbox": [ + 81, + 222, + 96, + 227 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 80, + 240, + 489, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 240, + 489, + 260 + ], + "spans": [ + { + "bbox": [ + 80, + 240, + 489, + 260 + ], + "type": "text", + "content": "I have given you with two Lean4 code solutions to the same problem. The first solution fails to compile in Lean4, while the second solution compiles successfully." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 81, + 260, + 157, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 260, + 157, + 269 + ], + "spans": [ + { + "bbox": [ + 81, + 260, + 157, + 269 + ], + "type": "text", + "content": "Your task is to:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 81, + 269, + 493, + 316 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 81, + 269, + 464, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 269, + 464, + 279 + ], + "spans": [ + { + "bbox": [ + 81, + 269, + 464, + 279 + ], + "type": "text", + "content": "1. Act as a verification assistant and carefully compare these two code snippets." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 81, + 279, + 488, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 279, + 488, + 296 + ], + "spans": [ + { + "bbox": [ + 81, + 279, + 488, + 296 + ], + "type": "text", + "content": "2. Identify the specific errors or flawed strategies in the first solution that caused compilation failure." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 81, + 297, + 493, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 297, + 493, + 316 + ], + "spans": [ + { + "bbox": [ + 81, + 297, + 493, + 316 + ], + "type": "text", + "content": "3. Explain the reasoning process that would lead someone from the incorrect approach to the correct solution." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 80, + 325, + 511, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 325, + 511, + 364 + ], + "spans": [ + { + "bbox": [ + 80, + 325, + 511, + 364 + ], + "type": "text", + "content": "When analyzing the code, please simulate the thought process of someone examining their own proof. Begin sections of your analysis with phrases like \"Let's verify my proof...\", \"Wait, I see an issue here...\", or \"Let me reconsider this approach...\" This should demonstrate how someone might catch and correct their own mistakes." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 80, + 372, + 498, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 372, + 498, + 402 + ], + "spans": [ + { + "bbox": [ + 80, + 372, + 498, + 402 + ], + "type": "text", + "content": "The analysis emphasizes conceptual understanding over syntax details, explaining the fundamental logical or strategic errors in the initial solution and demonstrating how the corrected solution properly addresses these conceptual problems." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 80, + 411, + 250, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 411, + 250, + 420 + ], + "spans": [ + { + "bbox": [ + 80, + 411, + 250, + 420 + ], + "type": "text", + "content": "Please structure your response with:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 80, + 421, + 504, + 457 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 80, + 421, + 349, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 421, + 349, + 429 + ], + "spans": [ + { + "bbox": [ + 80, + 421, + 349, + 429 + ], + "type": "text", + "content": "- Identification of specific errors in the first solution." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 80, + 430, + 377, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 430, + 377, + 439 + ], + "spans": [ + { + "bbox": [ + 80, + 430, + 377, + 439 + ], + "type": "text", + "content": "- Explanation of the conceptual issues that led to these errors." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 80, + 439, + 504, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 439, + 504, + 457 + ], + "spans": [ + { + "bbox": [ + 80, + 439, + 504, + 457 + ], + "type": "text", + "content": "- How to fix the conceptual problems in error so as to generate the problem-solving idea of the second solution?" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 80, + 466, + 508, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 466, + 508, + 553 + ], + "spans": [ + { + "bbox": [ + 80, + 466, + 508, + 553 + ], + "type": "text", + "content": "Do not provide any new Lean4 code beyond what I've given you - focus exclusively on analyzing the provided code. Don't include the phased titles in the output results, such as \"Identification of Specific Errors in the First Solution\", \"Conceptual Issues That Led to These Errors\", etc. Also, don't use expressions like \"the first solution\" or \"the second solution\". Use \"current solution\" to represent \"first solution\". Although you used the second solution for auxiliary analysis, avoid revealing in your response that you've seen its content. For example, refrain from saying things like 'I noticed that in the new solution.' Instead, respond as if you're thinking independently, based solely on the first solution." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 67, + 579, + 526, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 579, + 526, + 700 + ], + "spans": [ + { + "bbox": [ + 67, + 579, + 526, + 700 + ], + "type": "text", + "content": "Reward Function. Unlike stepwise rewards, the feedback is provided only after the full proof is compiled by Lean 4 verifier in our experiments. Our rewards are derived from: (1) Terminal reward " + }, + { + "bbox": [ + 67, + 579, + 526, + 700 + ], + "type": "inline_equation", + "content": "R_{\\text{success}}" + }, + { + "bbox": [ + 67, + 579, + 526, + 700 + ], + "type": "text", + "content": ": a scalar reward granted if the entire proof is validated successfully by Lean 4 verifier. (2) Penalty " + }, + { + "bbox": [ + 67, + 579, + 526, + 700 + ], + "type": "inline_equation", + "content": "R_{\\text{fail}}" + }, + { + "bbox": [ + 67, + 579, + 526, + 700 + ], + "type": "text", + "content": ": a negative reward for proofs with critical errors (e.g., type mismatches, infinite loops, unsolved goals and etc). Moreover, we observe that there are warnings in the feedback, such as some unnecessary or redundant steps have no negative effects on the final validation. In our experiments, we ignore warning cases as long as the compilation and verification process passes successfully. So, given the feedback " + }, + { + "bbox": [ + 67, + 579, + 526, + 700 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 67, + 579, + 526, + 700 + ], + "type": "text", + "content": " from Lean 4 compiler, our final reward function can be formulated as:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 168, + 708, + 525, + 744 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 708, + 525, + 744 + ], + "spans": [ + { + "bbox": [ + 168, + 708, + 525, + 744 + ], + "type": "interline_equation", + "content": "R (\\tau) = \\left\\{ \\begin{array}{l l} R _ {\\text {s u c c e s s}} & \\text {i f L e a n 4 f u l l y v a l i d a t e s} \\tau \\\\ R _ {\\text {f a i l}} & \\text {o t h e r w i s e (s y n t a x e r r o r s / t i m e o u t)} \\end{array} \\right. \\tag {4}", + "image_path": "24634dc2af6e2b833e2bb966dcb1e44f788c6180dc74eaacb30af951688f8d44.jpg" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 777, + 301, + 787 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 777, + 301, + 787 + ], + "spans": [ + { + "bbox": [ + 293, + 777, + 301, + 787 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 84, + 527, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 84, + 527, + 193 + ], + "spans": [ + { + "bbox": [ + 69, + 84, + 527, + 193 + ], + "type": "text", + "content": "Implementation We conduct RL training based on the Leanabell-Prover-SFT. We use a constant learning rate of " + }, + { + "bbox": [ + 69, + 84, + 527, + 193 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-6}" + }, + { + "bbox": [ + 69, + 84, + 527, + 193 + ], + "type": "text", + "content": ". For each theorem, we sample a group of 32 candidate proofs, with maximal rollout length set to 8192. The training global batch size is set to " + }, + { + "bbox": [ + 69, + 84, + 527, + 193 + ], + "type": "inline_equation", + "content": "32 \\times 32 = 1024" + }, + { + "bbox": [ + 69, + 84, + 527, + 193 + ], + "type": "text", + "content": ". On the RL training data, we select samples from those whose number of validations in Pass@32 falls within the range of [2, 16]. We believe this subset of data has a certain level of difficulty while providing exploration space, making it effective for updating the policy model. The detailed distribution of pass@32 is presented in Figure 6 in Appendix C. Finally, we obtain the RL version model named as Leanabell-Prover-RL." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 214, + 162, + 230 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 214, + 162, + 230 + ], + "spans": [ + { + "bbox": [ + 69, + 214, + 162, + 230 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 240, + 527, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 240, + 527, + 281 + ], + "spans": [ + { + "bbox": [ + 69, + 240, + 527, + 281 + ], + "type": "text", + "content": "Benchmarks We follow the previous work (Lin et al., 2025; Xin et al., 2024; Wang et al., 2024) and mainly validate the effectiveness of our proposed method on the most commonly-used MiniF2F-test (Zheng et al., 2021)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 303, + 527, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 303, + 527, + 521 + ], + "spans": [ + { + "bbox": [ + 69, + 303, + 527, + 521 + ], + "type": "text", + "content": "Results on MiniF2F. We have two versions of our models posttrained from two strong prover models: Deepseek-Prover-v1.5-SFT and Goedel-Prover-SFT, namely Leanabell-Prover-DS and Leanabell-Prover-GD. We mainly compare current whole proof generation methods, while ignore those with proof-step methods using far more inference-compute. As shown in Table 2, our posttraining framework boosts both DeepSeek-Prover-v1.5-SFT and Goedel-Prover-SFT models. On the minimum sample budget, Leanabell-Prover-GD-RL achieves the SOTA of MiniF2F (59.8% on pass@32), which gains up to 2.2% (i.e. from Goedel-Prover SFT, from 57.6% to 59.8%). We can observe that following our continual training phase, our method (Leanabell-Prover-DS-SFT) shows improvement over its base model (DeepSeek-Prover-v1.5-SFT), and the RL version continues to effectively enhance its performance. Meanwhile, Leanabell-Prover-GD-SFT performs almost identically to Leanabell-Prover-DS-SFT. This is reasonable, as Goedel-Prover-SFT is finetuned from DeepSeek-Pover-v1.5-base, with a significantly larger amount of data compared to our continual training stage. Therefore, our continual training on Leanabell-Prover-GD-SFT primarily adjusts the model's reasoning ability across different math domain distributions and incorporates the proper CoT format with cognitive behaviors into the current training data. This makes the checkpoint more conducive to RL training." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "spans": [ + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "text", + "content": "We also increase the sampling budget to compare the performance gains. For the SFT models, as can be seen, the baseline DeepSeek-Prover-v1.5-SFT achieves around " + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "inline_equation", + "content": "2.2\\%" + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "text", + "content": " performance gains (i.e., " + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "inline_equation", + "content": "48.2\\%" + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "inline_equation", + "content": "50.4\\%" + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "text", + "content": "), as increasing sampling budget from 32 to 128. Within our configurations, our Leanabell-Prover-DS-SFT and Leanabell-Prover-GD-SFT models also achieve " + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "inline_equation", + "content": "1.8\\%" + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "text", + "content": " (i.e., " + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "inline_equation", + "content": "54.9\\%" + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "inline_equation", + "content": "56.7\\%" + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "text", + "content": ") and " + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "inline_equation", + "content": "1.2\\%" + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "text", + "content": " (i.e., " + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "inline_equation", + "content": "58.2\\%" + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "inline_equation", + "content": "59.4\\%" + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "text", + "content": ") performance gains on the same inference scaling experiments, respectively. For the RL models, DeepSeek-Prover-v1.5-RL achieves " + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "inline_equation", + "content": "1.6\\%" + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "text", + "content": " performance gains (i.e., " + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "inline_equation", + "content": "50.0\\%" + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "inline_equation", + "content": "51.6\\%" + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "text", + "content": "), while our Leanabell-Prover-DS-RL achieves more gains (i.e., " + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "inline_equation", + "content": "56.6\\%" + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "inline_equation", + "content": "59.0\\%" + }, + { + "bbox": [ + 69, + 526, + 527, + 649 + ], + "type": "text", + "content": "). Therefore, after the model has undergone SFT and RL training, our models still maintain the exploration capabilities." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 670, + 527, + 752 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 670, + 527, + 752 + ], + "spans": [ + { + "bbox": [ + 69, + 670, + 527, + 752 + ], + "type": "text", + "content": "Exploration Abilities and RL. We first examine our two SFT models, with their pass@16 accuracy at different sampling temperatures. This metric can serve as an indicator of the policy's exploration ability and is particularly relevant for RL, as it reflects the policy's ability to generate responses that can achieve a positive reward. As shown in Figure 3, we find both SFT models are exploratory, and thus ready for RL. The RL training rewards are shown in Figure 4. During our experiments, we also compared the original GRPO with Dr. GRPO (Liu et al., 2025), and" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 777, + 300, + 786 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 777, + 300, + 786 + ], + "spans": [ + { + "bbox": [ + 293, + 777, + 300, + 786 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 120, + 169, + 474, + 596 + ], + "blocks": [ + { + "bbox": [ + 120, + 169, + 474, + 596 + ], + "lines": [ + { + "bbox": [ + 120, + 169, + 474, + 596 + ], + "spans": [ + { + "bbox": [ + 120, + 169, + 474, + 596 + ], + "type": "table", + "html": "
MethodSample budgetminiF2F-test
TheoremLlama [28]12833.6%
DeepSeek-Prover-v1 [32]12846.1% ± 0.5%
DeepSeek-Prover-v1.5-Base [33]12829.7% ± 0.5%
320039.2%
640042.2%
DeepSeek-Prover-v1.5-SFT [33]3248.2% ± 0.6%
6449.6% ± 0.7%
12850.4% ± 0.4%
320053.3% ± 0.5%
DeepSeek-Prover-v1.5-RL [33]3250.0% ± 0.5%
6450.7% ± 0.4%
12851.6% ± 0.5%
320054.9% ± 0.7%
STP [7]12857.7% ± 0.6%
320061.7% ± 0.6%
Goedel-Prover-SFT [15]3257.6% ± 0.7%
320062.7%
Leanabell-Prover-DS-SFT3254.9%
6455.3%
12856.7%
Leanabell-Prover-DS-RL3256.6%
6457.4%
12859.0%
Leanabell-Prover-GD-SFT3258.2%
6459.0%
12859.4%
Leanabell-Prover-GD-RL3259.8%
6460.7%
12861.1%
", + "image_path": "2fc90dfe1c7eb7a35b8a8ef67fc0f69ff7ff4653848471c031d9155572184687.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 603, + 526, + 660 + ], + "lines": [ + { + "bbox": [ + 67, + 603, + 526, + 660 + ], + "spans": [ + { + "bbox": [ + 67, + 603, + 526, + 660 + ], + "type": "text", + "content": "Table 2 | Comparison with state-of-the-art methods on the miniF2F-test dataset. The notation " + }, + { + "bbox": [ + 67, + 603, + 526, + 660 + ], + "type": "inline_equation", + "content": "\\mu \\pm \\sigma" + }, + { + "bbox": [ + 67, + 603, + 526, + 660 + ], + "type": "text", + "content": " denotes the average accuracy " + }, + { + "bbox": [ + 67, + 603, + 526, + 660 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 67, + 603, + 526, + 660 + ], + "type": "text", + "content": " and the standard deviation " + }, + { + "bbox": [ + 67, + 603, + 526, + 660 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 67, + 603, + 526, + 660 + ], + "type": "text", + "content": ". \"DS\" and \"GD\" refer to using the DeepSeek-Prover-v1.5-SFT and Goedel-Prover-SFT as base models to continue SFT and RL training, respectively." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 777, + 304, + 787 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 777, + 304, + 787 + ], + "spans": [ + { + "bbox": [ + 290, + 777, + 304, + 787 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 184, + 86, + 409, + 284 + ], + "blocks": [ + { + "bbox": [ + 184, + 86, + 409, + 284 + ], + "lines": [ + { + "bbox": [ + 184, + 86, + 409, + 284 + ], + "spans": [ + { + "bbox": [ + 184, + 86, + 409, + 284 + ], + "type": "image", + "image_path": "01687d0709017cc46d9129128904e3099ce96b65ce6ffb05bc736ea378baf0ac.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 295, + 455, + 311 + ], + "lines": [ + { + "bbox": [ + 67, + 295, + 455, + 311 + ], + "spans": [ + { + "bbox": [ + 67, + 295, + 455, + 311 + ], + "type": "text", + "content": "Figure 3 | Exploration ability: pass@16 measures how well base models explore." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 71, + 321, + 286, + 496 + ], + "blocks": [ + { + "bbox": [ + 71, + 321, + 286, + 496 + ], + "lines": [ + { + "bbox": [ + 71, + 321, + 286, + 496 + ], + "spans": [ + { + "bbox": [ + 71, + 321, + 286, + 496 + ], + "type": "image", + "image_path": "7f4f27568a48a2be4320d9e2cbf50395a8c162d8e7903c6e2cea1fd88a15c8ad.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 507, + 525, + 535 + ], + "lines": [ + { + "bbox": [ + 67, + 507, + 525, + 535 + ], + "spans": [ + { + "bbox": [ + 67, + 507, + 525, + 535 + ], + "type": "text", + "content": "Figure 4 | Left: Reward curve during training Leanabell-Prover-Prover-DS-RL. Right: Reward curve during training Leanabell-Prover-Prover-GD-RL." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 308, + 321, + 521, + 496 + ], + "blocks": [ + { + "bbox": [ + 308, + 321, + 521, + 496 + ], + "lines": [ + { + "bbox": [ + 308, + 321, + 521, + 496 + ], + "spans": [ + { + "bbox": [ + 308, + 321, + 521, + 496 + ], + "type": "image", + "image_path": "33a464e26cdd80f93c82ec770c3dc8cb34fc16b34875ff02895ac22cb7f8d7e9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 555, + 526, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 555, + 526, + 719 + ], + "spans": [ + { + "bbox": [ + 67, + 555, + 526, + 719 + ], + "type": "text", + "content": "found that the training dynamics remained largely consistent under these two RL training algorithms. This may be attributed to the fact that the length of different rollout responses, regardless of whether they are correct or incorrect, does not vary significantly in formal language reasoning. We have also observed that selecting an appropriate prompt set is crucial for RL training. Merely using pass@N as the sole criterion is insufficient to unlock the full potential of RL. As shown in Figure 5, we analyze the distributions of error problems across different source types in the MiniF2F-test set. We observed that, based on DeepSeek-Prover-v1.5-SFT, errors can be reduced across all data source types in MiniF2F-test set, especially for AMC, MATH, and CUSTOM. However, this improvement is significantly reduced in optimization results based on Goedel-Prover-SFT. This suggests that such as the intrinsic difficulty level of the statements (e.g., whether they are at the AIME or IMO level), the coverage of mathematical domains, and the balance with the prover model's capabilities, are also important." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 776, + 302, + 787 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 776, + 302, + 787 + ], + "spans": [ + { + "bbox": [ + 290, + 776, + 302, + 787 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 84, + 288, + 217 + ], + "blocks": [ + { + "bbox": [ + 70, + 84, + 288, + 217 + ], + "lines": [ + { + "bbox": [ + 70, + 84, + 288, + 217 + ], + "spans": [ + { + "bbox": [ + 70, + 84, + 288, + 217 + ], + "type": "image", + "image_path": "35e63804bae7aac8349c0715b1dcd16bb5dda356dba66e77102f715225f4ac8d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 226, + 487, + 242 + ], + "lines": [ + { + "bbox": [ + 67, + 226, + 487, + 242 + ], + "spans": [ + { + "bbox": [ + 67, + 226, + 487, + 242 + ], + "type": "text", + "content": "Figure 5 | Distribution of problem types that failed verification on the MiniF2F-test set." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 306, + 84, + 524, + 218 + ], + "blocks": [ + { + "bbox": [ + 306, + 84, + 524, + 218 + ], + "lines": [ + { + "bbox": [ + 306, + 84, + 524, + 218 + ], + "spans": [ + { + "bbox": [ + 306, + 84, + 524, + 218 + ], + "type": "image", + "image_path": "c7758c7a762f4643a6567b155f612a84f49906540bd173b97b882308eb809395.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 260, + 331, + 275 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 260, + 331, + 275 + ], + "spans": [ + { + "bbox": [ + 67, + 260, + 331, + 275 + ], + "type": "text", + "content": "5. Conclusion, Limitation, and Future Work" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 286, + 526, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 286, + 526, + 382 + ], + "spans": [ + { + "bbox": [ + 67, + 286, + 526, + 382 + ], + "type": "text", + "content": "We present a series of ATP models, named Leanabell-Proverseries, by investigating the posttraining scaling of current provers. Leanabell-Prover is started with DeepSeek-Prover-v1.5-SFT and Goedel-Prover-SFT which are two well-trained whole-proof generation models. We first collect a large collection of formalized statements with proofs to continue training. More importantly, we embed cognitive behaviors into the base models by applying a second-stage continual training on such synthetic data. With such prepared SFT models, we finally achieve the final performance through the RL optimization." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 403, + 526, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 403, + 526, + 526 + ], + "spans": [ + { + "bbox": [ + 67, + 403, + 526, + 526 + ], + "type": "text", + "content": "Limitations. As we find that the base prover model (i.e., DeepSeek-Prover-v1.5) is a weak LLM compared to those used for posttraining scaling in natural languages, such as Deepseek v3 and the Qwen2.5 family. Although we have piloted to integrate cognitive behaviors into the model and selected the prompt set of RL according to the ability of our SFT models, the expected stronger RL performance has not fully materialized. Our findings right now are more in line with the replication on weak LLMs with RL (Liu et al., 2025; Zeng et al., 2025). Moreover, although we achieved performance gains, we observe that the integrated self-reflection capacities demonstrate a weakening trend after RL training. This suggests that effectively integrating cognitive behaviors into such weak base LLMs remains highly challenging." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 547, + 525, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 547, + 525, + 575 + ], + "spans": [ + { + "bbox": [ + 67, + 547, + 525, + 575 + ], + "type": "text", + "content": "Future Work. Our goal is to fully invoke formal reasoning abilities, and thus we will continue to explore the following directions (hopefully can achieve some improvement):" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 83, + 590, + 526, + 753 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 83, + 590, + 525, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 590, + 525, + 724 + ], + "spans": [ + { + "bbox": [ + 83, + 590, + 525, + 724 + ], + "type": "text", + "content": "- Bridging formal reasoning with natural languages. Reasoning in formal languages has natural benefits for theorem proving, including no mathematical knowledge hallucination, and all steps and states with verified feedback. However, reasoning abilities of current formal provers (including our current work) still lag behind those of natural language reasoning models. We have made the first step to design a CoT template and synthesize data accordingly, which is intended to insert natural language information to help formal reasoning. We also tried Deepseek R1 with prompting for formal proof generation, which achieves " + }, + { + "bbox": [ + 83, + 590, + 525, + 724 + ], + "type": "inline_equation", + "content": "51.6\\%" + }, + { + "bbox": [ + 83, + 590, + 525, + 724 + ], + "type": "text", + "content": " (pass@32) on MiniF2F-test. Therefore, we hope to develop more effective manners that can transfer the math knowledge and reasoning abilities in natural languages into formal proof generation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 83, + 725, + 526, + 753 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 725, + 526, + 753 + ], + "spans": [ + { + "bbox": [ + 83, + 725, + 526, + 753 + ], + "type": "text", + "content": "- Bridging whole proof generation with proof-step methods. We believe current RL framework can help bridging these two lines of methods. For example, we can replace the" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 777, + 304, + 787 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 777, + 304, + 787 + ], + "spans": [ + { + "bbox": [ + 290, + 777, + 304, + 787 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 94, + 84, + 526, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 84, + 526, + 127 + ], + "spans": [ + { + "bbox": [ + 94, + 84, + 526, + 127 + ], + "type": "text", + "content": "sampling-based response-level rollout in RL into proof-step rollout for better exploration (but still rollout into a whole proof or reach the maximum length, then calculate the response-level reward), thus improving the learning efficiency of the RL training stage." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 147, + 136, + 161 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 147, + 136, + 161 + ], + "spans": [ + { + "bbox": [ + 69, + 147, + 136, + 161 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 173, + 527, + 737 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 74, + 173, + 526, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 173, + 526, + 201 + ], + "spans": [ + { + "bbox": [ + 74, + 173, + 526, + 201 + ], + "type": "text", + "content": "[1] Anthropic. Claude 3.7 Sonnet System card. 2025. URL https://www.anthropic.com/news/claudi-3-7-sonnet." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 74, + 209, + 527, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 209, + 527, + 238 + ], + "spans": [ + { + "bbox": [ + 74, + 209, + 527, + 238 + ], + "type": "text", + "content": "[2] AoPS. Art of problem solving. https://artofproblemsolving.com/. Accessed: [date]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 74, + 246, + 525, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 246, + 525, + 286 + ], + "spans": [ + { + "bbox": [ + 74, + 246, + 525, + 286 + ], + "type": "text", + "content": "[3] Z. Azerbayev, H. Schoelkopf, K. Paster, M. D. Santos, S. McAleer, A. Q. Jiang, J. Deng, S. Biderman, and S. Welleck. LLemma: An open language model for mathematics. arXiv preprint arXiv:2310.10631, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 74, + 295, + 525, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 295, + 525, + 322 + ], + "spans": [ + { + "bbox": [ + 74, + 295, + 525, + 322 + ], + "type": "text", + "content": "[4] R. Coulom. Efficient selectivity and backup operators in monte-carlo tree search. In International conference on computers and games, pages 72-83. Springer, 2006." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 74, + 331, + 526, + 359 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 331, + 526, + 359 + ], + "spans": [ + { + "bbox": [ + 74, + 331, + 526, + 359 + ], + "type": "text", + "content": "[5] L. De Moura, S. Kong, J. Avigad, F. Van Doorn, and J. von Raumer. The Lean theorem prover (system description). In International Conference on Automated Deduction (CAD), 2015." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 74, + 367, + 524, + 394 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 367, + 524, + 394 + ], + "spans": [ + { + "bbox": [ + 74, + 367, + 524, + 394 + ], + "type": "text", + "content": "[6] DeepMind. Alphaproof and Alphageometry, July 2024. URL https://deepmind.google.de/discover/blog/ai-solves-imo-problems-at-silver-medal-level/." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 75, + 402, + 524, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 402, + 524, + 431 + ], + "spans": [ + { + "bbox": [ + 75, + 402, + 524, + 431 + ], + "type": "text", + "content": "[7] K. Dong and T. Ma. Stp: Self-play llm theorem provers with iterative conjecturing and proving. arXiv preprint arXiv:2502.00212, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 74, + 439, + 525, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 439, + 525, + 480 + ], + "spans": [ + { + "bbox": [ + 74, + 439, + 525, + 480 + ], + "type": "text", + "content": "[8] K. Gandhi, A. Chakravarthy, A. Singh, N. Lile, and N. D. Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars. arXiv preprint arXiv:2503.01307, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 74, + 488, + 525, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 488, + 525, + 529 + ], + "spans": [ + { + "bbox": [ + 74, + 488, + 525, + 529 + ], + "type": "text", + "content": "[9] D. Guo, D. Yang, H. Zhang, J. Song, R. Zhang, R. Xu, Q. Zhu, S. Ma, P. Wang, X. Bi, et al. Deepseek-R1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 538, + 525, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 538, + 525, + 565 + ], + "spans": [ + { + "bbox": [ + 69, + 538, + 525, + 565 + ], + "type": "text", + "content": "[10] B. Hui, J. Yang, Z. Cui, J. Yang, D. Liu, L. Zhang, T. Liu, J. Zhang, B. Yu, K. Dang, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 574, + 524, + 615 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 574, + 524, + 615 + ], + "spans": [ + { + "bbox": [ + 69, + 574, + 524, + 615 + ], + "type": "text", + "content": "[11] G. Lample, T. Lacroix, M.-A. Lachaux, A. Rodriguez, A. Hayat, T. Lavril, G. Ebner, and X. Martinet. Hypertree proof search for neural theorem proving. Advances in Neural Information Processing Systems (NeurIPS), 35, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 624, + 526, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 624, + 526, + 651 + ], + "spans": [ + { + "bbox": [ + 69, + 624, + 526, + 651 + ], + "type": "text", + "content": "[12] J. Li, E. Beeching, L. Tunstall, B. Lipkin, R. Soletskyi, S. C. Huang, K. Rasul, L. Yu, A. Jiang, Z. Shen, Z. Qin, B. Dong, L. Zhou, Y. Fleureau, G. Lample, and S. Polu. Numinamath, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 660, + 524, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 660, + 524, + 700 + ], + "spans": [ + { + "bbox": [ + 69, + 660, + 524, + 700 + ], + "type": "text", + "content": "[13] Y. Li, D. Du, L. Song, C. Li, W. Wang, T. Yang, and H. Mi. Hunyuanprover: A scalable data synthesis framework and guided tree search for automated theorem proving. arXiv preprint arXiv:2412.20735, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 709, + 524, + 737 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 709, + 524, + 737 + ], + "spans": [ + { + "bbox": [ + 69, + 709, + 524, + 737 + ], + "type": "text", + "content": "[14] H. Lin, Z. Sun, Y. Yang, and S. Welleck. Lean-star: Learning to interleave thinking and proving. arXiv preprint arXiv:2407.10040, 2024." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 776, + 303, + 787 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 776, + 303, + 787 + ], + "spans": [ + { + "bbox": [ + 290, + 776, + 303, + 787 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 84, + 526, + 756 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 68, + 84, + 526, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 84, + 526, + 126 + ], + "spans": [ + { + "bbox": [ + 68, + 84, + 526, + 126 + ], + "type": "text", + "content": "[15] Y. Lin, S. Tang, B. Lyu, J. Wu, H. Lin, K. Yang, J. Li, M. Xia, D. Chen, S. Arora, et al. Goedelprover: A frontier model for open-source automated theorem proving. arXiv preprint arXiv:2502.07640, 2025." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 134, + 525, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 134, + 525, + 161 + ], + "spans": [ + { + "bbox": [ + 69, + 134, + 525, + 161 + ], + "type": "text", + "content": "[16] Z. Liu, C. Chen, W. Li, P. Qi, T. Pang, C. Du, W. S. Lee, and M. Lin. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 170, + 524, + 197 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 170, + 524, + 197 + ], + "spans": [ + { + "bbox": [ + 69, + 170, + 524, + 197 + ], + "type": "text", + "content": "[17] mathlib4. The math library of lean 4, 2025. URL https://github.com/leanprover-community/mathlib4." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 206, + 524, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 206, + 524, + 233 + ], + "spans": [ + { + "bbox": [ + 69, + 206, + 524, + 233 + ], + "type": "text", + "content": "[18] L. d. Moura and S. Ullrich. The lean 4 theorem prover and programming language. In International Conference on Automated Deduction, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 243, + 392, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 243, + 392, + 257 + ], + "spans": [ + { + "bbox": [ + 69, + 243, + 392, + 257 + ], + "type": "text", + "content": "[19] L. C. Paulson. Isabelle: A generic theorem prover. Springer, 1994." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 265, + 525, + 292 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 265, + 525, + 292 + ], + "spans": [ + { + "bbox": [ + 69, + 265, + 525, + 292 + ], + "type": "text", + "content": "[20] S. Polu and I. Sutskever. Generative language modeling for automated theorem proving. arXiv preprint arXiv:2009.03393, 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 301, + 525, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 301, + 525, + 328 + ], + "spans": [ + { + "bbox": [ + 69, + 301, + 525, + 328 + ], + "type": "text", + "content": "[21] S. Polu, J. M. Han, K. Zheng, M. Baksys, I. Babuschkin, and I. Sutskever. Formal mathematics statement curriculum learning. arXiv preprint arXiv:2202.01344, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 338, + 525, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 338, + 525, + 378 + ], + "spans": [ + { + "bbox": [ + 69, + 338, + 525, + 378 + ], + "type": "text", + "content": "[22] R. Rafailov, A. Sharma, E. Mitchell, C. D. Manning, S. Ermon, and C. Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems (NeurIPS), 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 386, + 525, + 428 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 386, + 525, + 428 + ], + "spans": [ + { + "bbox": [ + 69, + 386, + 525, + 428 + ], + "type": "text", + "content": "[23] Z. Shao, P. Wang, Q. Zhu, R. Xu, J. Song, X. Bi, H. Zhang, M. Zhang, Y. Li, Y. Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 436, + 524, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 436, + 524, + 464 + ], + "spans": [ + { + "bbox": [ + 69, + 436, + 524, + 464 + ], + "type": "text", + "content": "[24] Q. Team. Qwen2.5: A party of foundation models, September 2024. URL https://qwen.lm.github.io/blog/qwen2.5/." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 473, + 524, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 473, + 524, + 500 + ], + "spans": [ + { + "bbox": [ + 69, + 473, + 524, + 500 + ], + "type": "text", + "content": "[25] Q. Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025. URL https://qwenlm.github.io/blog/qwq-32b/." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 508, + 524, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 508, + 524, + 535 + ], + "spans": [ + { + "bbox": [ + 69, + 508, + 524, + 535 + ], + "type": "text", + "content": "[26] T. H. Trinh, Y. Wu, Q. V. Le, H. He, and T. Luong. Solving olympiad geometry without human demonstrations. Nature, 625(7995):476-482, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 544, + 525, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 544, + 525, + 585 + ], + "spans": [ + { + "bbox": [ + 69, + 544, + 525, + 585 + ], + "type": "text", + "content": "[27] Z. Wan, Y. Li, Y. Song, H. Wang, L. Yang, M. Schmidt, J. Wang, W. Zhang, S. Hu, and Y. Wen. Rema: Learning to meta-think for llms with multi-agent reinforcement learning. arXiv preprint arXiv:2503.09501, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 593, + 524, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 593, + 524, + 622 + ], + "spans": [ + { + "bbox": [ + 69, + 593, + 524, + 622 + ], + "type": "text", + "content": "[28] R. Wang, J. Zhang, Y. Jia, R. Pan, S. Diao, R. Pi, and T. Zhang. Theoremlama: Transforming general-purpose llms into lean4 experts. arXiv preprint arXiv:2407.03203, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 629, + 525, + 671 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 629, + 525, + 671 + ], + "spans": [ + { + "bbox": [ + 69, + 629, + 525, + 671 + ], + "type": "text", + "content": "[29] R. Wang, R. Pan, Y. Li, J. Zhang, Y. Jia, S. Diao, R. Pi, J. Hu, and T. Zhang. Ma-lot: Multiagent lean-based long chain-of-thought reasoning enhances formal theorem proving. arXiv preprint arXiv:2503.03205, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 679, + 525, + 720 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 679, + 525, + 720 + ], + "spans": [ + { + "bbox": [ + 69, + 679, + 525, + 720 + ], + "type": "text", + "content": "[30] Z. Wu, S. Huang, Z. Zhou, H. Ying, J. Wang, D. Lin, and K. Chen. Internl m2.5-Stepprover: Advancing automated theorem proving via expert iteration on large-scale lean problems. arXiv preprint arXiv:2410.15700, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 729, + 524, + 756 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 729, + 524, + 756 + ], + "spans": [ + { + "bbox": [ + 69, + 729, + 524, + 756 + ], + "type": "text", + "content": "[31] Z. Wu, J. Wang, D. Lin, and K. Chen. Lean-github: Compiling github lean repositories for a versatile lean prover. arXiv preprint arXiv:2407.17227, 2024." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 777, + 303, + 787 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 777, + 303, + 787 + ], + "spans": [ + { + "bbox": [ + 290, + 777, + 303, + 787 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 84, + 527, + 447 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 68, + 84, + 527, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 84, + 527, + 126 + ], + "spans": [ + { + "bbox": [ + 68, + 84, + 527, + 126 + ], + "type": "text", + "content": "[32] H. Xin, D. Guo, Z. Shao, Z. Ren, Q. Zhu, B. Liu, C. Ruan, W. Li, and X. Liang. Deepseek-prover: Advancing theorem proving in llms through large-scale synthetic data. arXiv preprint arXiv:2405.14333, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 134, + 525, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 134, + 525, + 176 + ], + "spans": [ + { + "bbox": [ + 69, + 134, + 525, + 176 + ], + "type": "text", + "content": "[33] H. Xin, Z. Ren, J. Song, Z. Shao, W. Zhao, H. Wang, B. Liu, L. Zhang, X. Lu, Q. Du, et al. Deepseek-prover-v1. 5: Harnessing proof assistant feedback for reinforcement learning and monte-carlo tree search. arXiv preprint arXiv:2408.08152, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 183, + 527, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 183, + 527, + 225 + ], + "spans": [ + { + "bbox": [ + 69, + 183, + 527, + 225 + ], + "type": "text", + "content": "[34] R. Xin, C. Xi, J. Yang, F. Chen, H. Wu, X. Xiao, Y. Sun, S. Zheng, and K. Shen. Bfs-prover: Scalable best-first tree search for llm-based automatic theorem proving. arXiv preprint arXiv:2502.03438, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 232, + 527, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 232, + 527, + 275 + ], + "spans": [ + { + "bbox": [ + 69, + 232, + 527, + 275 + ], + "type": "text", + "content": "[35] K. Yang, A. Swope, A. Gu, R. Chalamala, P. Song, S. Yu, S. Godil, R. J. Prenger, and A. Anandkumar. Leandojo: Theorem proving with retrieval-augmented language models. Advances in Neural Information Processing Systems (NeurIPS), 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 282, + 527, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 282, + 527, + 324 + ], + "spans": [ + { + "bbox": [ + 69, + 282, + 527, + 324 + ], + "type": "text", + "content": "[36] H. Ying, Z. Wu, Y. Geng, J. Wang, D. Lin, and K. Chen. Lean workbook: A large-scale lean problem set formalized from natural language math problems. arXiv preprint arXiv:2406.03847, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 332, + 527, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 332, + 527, + 374 + ], + "spans": [ + { + "bbox": [ + 69, + 332, + 527, + 374 + ], + "type": "text", + "content": "[37] W. Zeng, Y. Huang, Q. Liu, W. Liu, K. He, Z. Ma, and J. He. Simplerl-zoo: Investigating and taming zero reinforcement learning for open base models in the wild. arXiv preprint arXiv:2503.18892, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 382, + 527, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 382, + 527, + 411 + ], + "spans": [ + { + "bbox": [ + 69, + 382, + 527, + 411 + ], + "type": "text", + "content": "[38] X. Zhao, W. Wu, J. Guan, and L. Kong. Promptcot: Synthesizing olympiad-level problems for mathematical reasoning in large language models. arXiv preprint arXiv:2503.02324, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 417, + 527, + 447 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 417, + 527, + 447 + ], + "spans": [ + { + "bbox": [ + 69, + 417, + 527, + 447 + ], + "type": "text", + "content": "[39] K. Zheng, J. M. Han, and S. Polu. Minif2f: a cross-system benchmark for formal olympiad-level mathematics. arXiv preprint arXiv:2109.00110, 2021." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 776, + 304, + 787 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 776, + 304, + 787 + ], + "spans": [ + { + "bbox": [ + 290, + 776, + 304, + 787 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 84, + 131, + 100 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 84, + 131, + 100 + ], + "spans": [ + { + "bbox": [ + 68, + 84, + 131, + 100 + ], + "type": "text", + "content": "Appendix" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 110, + 201, + 123 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 110, + 201, + 123 + ], + "spans": [ + { + "bbox": [ + 68, + 110, + 201, + 123 + ], + "type": "text", + "content": "A. Formalizer Details" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 136, + 525, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 136, + 525, + 163 + ], + "spans": [ + { + "bbox": [ + 67, + 136, + 525, + 163 + ], + "type": "text", + "content": "We start with Qwen25-Coder-32B-Instruct (Hui et al., 2024) and use following instruct prompt to train the formalizer:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 251, + 179, + 342, + 189 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 251, + 179, + 342, + 189 + ], + "spans": [ + { + "bbox": [ + 251, + 179, + 342, + 189 + ], + "type": "text", + "content": "Formalizer Prompt" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 80, + 193, + 498, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 193, + 498, + 213 + ], + "spans": [ + { + "bbox": [ + 80, + 193, + 498, + 213 + ], + "type": "text", + "content": "Please translate the mathematical statement {informal_statement} into a theorem statement in Lean 4 code." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 80, + 221, + 490, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 221, + 490, + 241 + ], + "spans": [ + { + "bbox": [ + 80, + 221, + 490, + 241 + ], + "type": "text", + "content": "Please do not generate codes of proof or comment sentences (e.g., starting with '/-' or '-')." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 80, + 249, + 455, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 249, + 455, + 261 + ], + "spans": [ + { + "bbox": [ + 80, + 249, + 455, + 261 + ], + "type": "text", + "content": "The Lean 4 codes are required to complete the 'statement' in the following text:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 81, + 275, + 130, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 275, + 130, + 286 + ], + "spans": [ + { + "bbox": [ + 81, + 275, + 130, + 286 + ], + "type": "text", + "content": "\" ' lean4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 81, + 288, + 342, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 288, + 342, + 307 + ], + "spans": [ + { + "bbox": [ + 81, + 288, + 342, + 307 + ], + "type": "text", + "content": "theorem lean_workbook 'statement' := by sorry" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 66, + 344, + 526, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 344, + 526, + 439 + ], + "spans": [ + { + "bbox": [ + 66, + 344, + 526, + 439 + ], + "type": "text", + "content": "As shown in Table 3, the formalizer is targeted to translate the natural language statement to formal statement in Lean 4 codes. We take use of the 29.7K data released by Goedel-Prover (Lin et al., 2025), which provides pairs of informal statement and formal statement in each sample. We train the formalizer with a fixed learning rate " + }, + { + "bbox": [ + 66, + 344, + 526, + 439 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-6}" + }, + { + "bbox": [ + 66, + 344, + 526, + 439 + ], + "type": "text", + "content": " for 2 epochs. We verify the Compiling Correctness (CC) Test, and Faithfulness and Completeness (FC) Test by following the prompts in Goedel-Prover. As shown in Table 4, our formalizer performs similarly to the Formalizer A (Lin et al., 2025)." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 69, + 448, + 526, + 584 + ], + "blocks": [ + { + "bbox": [ + 69, + 448, + 526, + 584 + ], + "lines": [ + { + "bbox": [ + 69, + 448, + 526, + 584 + ], + "spans": [ + { + "bbox": [ + 69, + 448, + 526, + 584 + ], + "type": "table", + "html": "
Example 1Example 2
Informal StatementSolve for x in the given inequality: x2-2x-24<0Prove that ln(eπ) is equal to π.
Formalizer Outputtheorem lean_workbook (x : R): x^2 - 2*x - 24 < 0 ↔ x ∈ Set.Ioo (-4) 6 := by sorrytheorem lean_workbook : Real.log (Real.exp π) = π := by sorry
", + "image_path": "81ecbadfa92856e0289f8658a20571661770530cab22ef47491919d27bd74bce.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 150, + 622, + 444, + 691 + ], + "blocks": [ + { + "bbox": [ + 67, + 592, + 409, + 607 + ], + "lines": [ + { + "bbox": [ + 67, + 592, + 409, + 607 + ], + "spans": [ + { + "bbox": [ + 67, + 592, + 409, + 607 + ], + "type": "text", + "content": "Table 3 | Examples of formalizer inputs and outputs for two examples." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 150, + 622, + 444, + 691 + ], + "lines": [ + { + "bbox": [ + 150, + 622, + 444, + 691 + ], + "spans": [ + { + "bbox": [ + 150, + 622, + 444, + 691 + ], + "type": "table", + "html": "
ModelCC Test (%)FC Test (%)
Formalizer A (Lin et al., 2025)76.748.1
Formalizer B (Lin et al., 2025)88.580.4
Ours Formalizer77.649.0
", + "image_path": "3d92cbecdced18c0f702bb3e88a457e85c4a980d84f653daaa42c640ec2b117d.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 699, + 525, + 727 + ], + "lines": [ + { + "bbox": [ + 67, + 699, + 525, + 727 + ], + "spans": [ + { + "bbox": [ + 67, + 699, + 525, + 727 + ], + "type": "text", + "content": "Table 4 | Quality assessment of the formalized statement. \"CC\" refers to Compiling Correctness (CC) Test and \"FC\" refers to Faithfulness and Completeness (FC) Test." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 777, + 303, + 787 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 777, + 303, + 787 + ], + "spans": [ + { + "bbox": [ + 290, + 777, + 303, + 787 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 84, + 255, + 100 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 84, + 255, + 100 + ], + "spans": [ + { + "bbox": [ + 69, + 84, + 255, + 100 + ], + "type": "text", + "content": "B. Cognitive Behaviors Design" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 111, + 227, + 126 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 111, + 227, + 126 + ], + "spans": [ + { + "bbox": [ + 69, + 111, + 227, + 126 + ], + "type": "text", + "content": "B.1. Lean Completion Example" + } + ] + } + ], + "index": 1 + }, + { + "type": "code", + "bbox": [ + 68, + 152, + 525, + 752 + ], + "blocks": [ + { + "bbox": [ + 69, + 134, + 98, + 148 + ], + "lines": [ + { + "bbox": [ + 69, + 134, + 98, + 148 + ], + "spans": [ + { + "bbox": [ + 69, + 134, + 98, + 148 + ], + "type": "text", + "content": "Input:" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 68, + 152, + 525, + 752 + ], + "lines": [ + { + "bbox": [ + 68, + 152, + 525, + 752 + ], + "spans": [ + { + "bbox": [ + 68, + 152, + 525, + 752 + ], + "type": "text", + "content": "Complete the following Lean 4 code with explanatory comments preceding each line of code: \n' \\(\\mathit{lean4}\\) \nimport Mathlib \nimport Aesop \nset_option maxHeartbeats O \nopen BigOperators Real Nat Topology Rat \n/\\~ Given \\(\\sin (\\frac{\\pi}{4} -x) = \\frac{3}{5}\\) , find the value of sin2x. Show that it is \\(\\backslash\\) frac{7}{25}. - / theorem sin_2x_value (x : R) (h : sin (Real.pi / 4 - x) = 3 / 5) : sin (2 * x) \\(= 7 / 25\\coloneqq\\) by \n/\\~ Given \\(\\backslash (\\backslash \\sin \\backslash \\text{left} (\\backslash \\text{frac} (\\backslash \\text{pi})\\{4\\} -\\text{x}\\backslash \\text{right}) = \\backslash \\text{frac} (3)\\{5\\} \\backslash)\\) , we need to find the value of \\(\\backslash (\\backslash \\sin 2x\\backslash)\\) . We start by using the trigonometric identity for the sine of a difference and the double-angle formula for sine. Specifically, we use the identity \\(\\backslash (\\backslash \\sin (a - b) = \\backslash \\sin a\\backslash \\cos b - \\backslash \\cos a\\backslash \\sin b\\backslash)\\) and the double-angle formula \\(\\backslash (\\backslash \\sin 2x = 2\\backslash \\sin x\\backslash \\cos x\\backslash)\\) . 1. Substitute \\(\\backslash (\\backslash \\sin \\backslash \\text{left} (\\backslash \\text{frac} (\\backslash \\text{pi})\\{4\\} -\\text{x}\\backslash \\text{right}) = \\backslash \\text{frac} (3)\\{5\\} \\backslash)\\) into the identity for the sine of a difference: [ \\\\sin \\(\\backslash\\) left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right) \\(=\\) \\\\sin \\(\\backslash\\) left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right) \\(=\\) \\\\sin \\(\\backslash\\) left(a-b) \\(=\\) \\\\sin a\\cos b- cos a\\sin b) and the double-angle formula \\(\\backslash (\\backslash \\sin 2x = 2\\backslash \\sin x\\backslash \\cos x\\backslash)\\) \n] \n2. Calculate \\(\\backslash (\\backslash \\sin \\backslash \\text{left} (\\backslash \\text{frac} (\\backslash \\text{pi})\\{4\\} -\\text{x}\\backslash \\text{right}) = \\backslash \\text{frac} (3)\\{5\\} \\backslash)\\) into \\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right) and \\\\( (\\cos \\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right)\\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right)\\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right)\\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right)\\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right)\\\\left(\\frac{\\frac{\\pi}{4}}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4}\\)-x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x\\right)\\\\left(\\frac{\\pi}{4} -x" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "code", + "guess_lang": "txt" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 776, + 303, + 787 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 776, + 303, + 787 + ], + "spans": [ + { + "bbox": [ + 290, + 776, + 303, + 787 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 69, + 84, + 526, + 267 + ], + "blocks": [ + { + "bbox": [ + 69, + 84, + 526, + 267 + ], + "lines": [ + { + "bbox": [ + 69, + 84, + 526, + 267 + ], + "spans": [ + { + "bbox": [ + 69, + 84, + 526, + 267 + ], + "type": "text", + "content": "\\sin 2x = 2 \\sin \\left(\\text {left} (\\text {x} \\text {right}) \\cos \\left(\\text {left} (\\text {x} \\text {right}) \\right.\\right) \n\\] \n7. Substitute " + }, + { + "bbox": [ + 69, + 84, + 526, + 267 + ], + "type": "inline_equation", + "content": "\\backslash (\\backslash \\cos \\backslash \\text {left} (\\text {x} \\backslash \\text {right}) - \\backslash \\sin \\backslash \\text {left} (\\text {x} \\backslash \\text {right}) = \\backslash \\operatorname{frac}\\{3" + }, + { + "bbox": [ + 69, + 84, + 526, + 267 + ], + "type": "inline_equation", + "content": "\\backslash \\operatorname{sqrt}\\{2\\} \\} \\{5\\} \\backslash)" + }, + { + "bbox": [ + 69, + 84, + 526, + 267 + ], + "type": "text", + "content": " into the equation: \n\\[\n\\begin{array}{l}\n\\backslash [ \\\\\n\\sin 2x = 2 \\backslash \\sin \\backslash \\text {left} (\\text {x} \\backslash \\text {right}) \\cos \\backslash \\text {left} (\\text {x} \\backslash \\text {right}) = 2 \\backslash \\operatorname{left} (\\backslash \\operatorname{frac}\\{3\n\\} \\backslash \\operatorname{sqrt}\\{2\\}) \\{5\\} \\backslash \\text {right}) \\backslash \\operatorname{left} (\\backslash \\operatorname{frac}\\{\\backslash \\operatorname{sqrt}\\{2\\}\\} \\{2\\} \\backslash \\text {right}) = 2 \\backslash \\operatorname{cdot} \\backslash \\operatorname{frac}\\{3\n\\} \\backslash \\operatorname{sqrt}\\{2\\}) \\{5\\} \\backslash \\operatorname{cdot} \\backslash \\operatorname{frac}\\{\\backslash \\operatorname{sqrt}\\{2\\}\\} \\{2\\} = \\backslash \\operatorname{frac}\\{3 \\backslash \\operatorname{sqrt}\\{2\\}\\} \\{5\\} \\backslash \\operatorname{cdot}\n\\end{array}\n\\] \n" + }, + { + "bbox": [ + 69, + 84, + 526, + 267 + ], + "type": "inline_equation", + "content": "\\backslash" + }, + { + "bbox": [ + 69, + 84, + 526, + 267 + ], + "type": "text", + "content": " \n8. Simplify the final expression: \n\\[\n\\begin{array}{l}\n\\backslash [ \\\\\n\\sin 2x = \\backslash \\operatorname{frac}\\{7\\} \\{25\\}\n\\end{array}\n\\]" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_body" + } + ], + "index": 0, + "sub_type": "code", + "guess_lang": "latex" + }, + { + "type": "code", + "bbox": [ + 69, + 300, + 526, + 423 + ], + "blocks": [ + { + "bbox": [ + 68, + 281, + 131, + 296 + ], + "lines": [ + { + "bbox": [ + 68, + 281, + 131, + 296 + ], + "spans": [ + { + "bbox": [ + 68, + 281, + 131, + 296 + ], + "type": "text", + "content": "Wrong Proof:" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 69, + 300, + 526, + 423 + ], + "lines": [ + { + "bbox": [ + 69, + 300, + 526, + 423 + ], + "spans": [ + { + "bbox": [ + 69, + 300, + 526, + 423 + ], + "type": "text", + "content": "-- Use the trigonometric identity for the sine of a difference and the double-angle formula for sine. \nsimp [Real.sin_sub, Real.sin_two.mul, Real.cos_two.mul, Real.sin(pi_div_four, Real.cos(pi_div_four] at h " + }, + { + "bbox": [ + 69, + 300, + 526, + 423 + ], + "type": "inline_equation", + "content": "\\vdash" + }, + { + "bbox": [ + 69, + 300, + 526, + 423 + ], + "type": "text", + "content": " \n-- Simplify the expression using field operations. \nfield_simp at h " + }, + { + "bbox": [ + 69, + 300, + 526, + 423 + ], + "type": "inline_equation", + "content": "\\vdash" + }, + { + "bbox": [ + 69, + 300, + 526, + 423 + ], + "type": "text", + "content": " \n-- Normalize the expression by expanding and simplifying it. \nring_nf at h " + }, + { + "bbox": [ + 69, + 300, + 526, + 423 + ], + "type": "inline_equation", + "content": "\\vdash" + }, + { + "bbox": [ + 69, + 300, + 526, + 423 + ], + "type": "text", + "content": " \n-- Use non-linear arithmetic to verify the final result. \nnlinarith [Real.sin_sq_add_cos_sq x, Real.sin_le_one x, Real.cos_le_one x]" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 68, + 455, + 297, + 543 + ], + "blocks": [ + { + "bbox": [ + 68, + 438, + 170, + 452 + ], + "lines": [ + { + "bbox": [ + 68, + 438, + 170, + 452 + ], + "spans": [ + { + "bbox": [ + 68, + 438, + 170, + 452 + ], + "type": "text", + "content": "Tactic Error Messages:" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 68, + 455, + 297, + 543 + ], + "lines": [ + { + "bbox": [ + 68, + 455, + 297, + 543 + ], + "spans": [ + { + "bbox": [ + 68, + 455, + 297, + 543 + ], + "type": "text", + "content": "linarith failed to find a contradiction \ncase h1.h \nx: " + }, + { + "bbox": [ + 68, + 455, + 297, + 543 + ], + "type": "inline_equation", + "content": "\\mathbb{R}" + }, + { + "bbox": [ + 68, + 455, + 297, + 543 + ], + "type": "text", + "content": " \nh: " + }, + { + "bbox": [ + 68, + 455, + 297, + 543 + ], + "type": "inline_equation", + "content": "\\sqrt{2} * x.\\cos * 5 - \\sqrt{2} * x.\\sin * 5 = 6" + }, + { + "bbox": [ + 68, + 455, + 297, + 543 + ], + "type": "text", + "content": " \nat: x.cos * x.sin * 50 < 7 \n" + }, + { + "bbox": [ + 68, + 455, + 297, + 543 + ], + "type": "inline_equation", + "content": "\\vdash" + }, + { + "bbox": [ + 68, + 455, + 297, + 543 + ], + "type": "text", + "content": " False \nfailed" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 69, + 577, + 526, + 710 + ], + "blocks": [ + { + "bbox": [ + 68, + 559, + 166, + 573 + ], + "lines": [ + { + "bbox": [ + 68, + 559, + 166, + 573 + ], + "spans": [ + { + "bbox": [ + 68, + 559, + 166, + 573 + ], + "type": "text", + "content": "Searched Valid Proof:" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 69, + 577, + 526, + 710 + ], + "lines": [ + { + "bbox": [ + 69, + 577, + 526, + 710 + ], + "spans": [ + { + "bbox": [ + 69, + 577, + 526, + 710 + ], + "type": "text", + "content": "-- Use the trigonometric identity for the sine of a difference and the double-angle formula for sine. \nsimp [Real.sin_sub, Real.sin_two.mul, Real.cos_two.mul, Real.sin(pi_div_four, Real.cos(pi_div_four] at h " + }, + { + "bbox": [ + 69, + 577, + 526, + 710 + ], + "type": "inline_equation", + "content": "\\vdash" + }, + { + "bbox": [ + 69, + 577, + 526, + 710 + ], + "type": "text", + "content": " \n-- Simplify the expression using field operations. \nfield_simp at h " + }, + { + "bbox": [ + 69, + 577, + 526, + 710 + ], + "type": "inline_equation", + "content": "\\vdash" + }, + { + "bbox": [ + 69, + 577, + 526, + 710 + ], + "type": "text", + "content": " \n-- Normalize the expression by expanding and simplifying it. \nring_nf at h " + }, + { + "bbox": [ + 69, + 577, + 526, + 710 + ], + "type": "inline_equation", + "content": "\\vdash" + }, + { + "bbox": [ + 69, + 577, + 526, + 710 + ], + "type": "text", + "content": " \n-- Use the Pythagorean identity to simplify further. \nnlinarith [Real.sin_sq_add_cos_sq x, Real.sqrt_nonneg 2, sq_sqrt (show " + }, + { + "bbox": [ + 69, + 577, + 526, + 710 + ], + "type": "inline_equation", + "content": "0 \\leq" + }, + { + "bbox": [ + 69, + 577, + 526, + 710 + ], + "type": "text", + "content": " 2 by norm_num)]\"" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "code", + "guess_lang": "sql" + }, + { + "type": "code", + "bbox": [ + 69, + 746, + 111, + 757 + ], + "blocks": [ + { + "bbox": [ + 68, + 726, + 128, + 739 + ], + "lines": [ + { + "bbox": [ + 68, + 726, + 128, + 739 + ], + "spans": [ + { + "bbox": [ + 68, + 726, + 128, + 739 + ], + "type": "text", + "content": "Claude CoTs:" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 69, + 746, + 111, + 757 + ], + "lines": [ + { + "bbox": [ + 69, + 746, + 111, + 757 + ], + "spans": [ + { + "bbox": [ + 69, + 746, + 111, + 757 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "code_body" + } + ], + "index": 8, + "sub_type": "code", + "guess_lang": "txt" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 776, + 303, + 787 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 776, + 303, + 787 + ], + "spans": [ + { + "bbox": [ + 290, + 776, + 303, + 787 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 85, + 515, + 122 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 85, + 515, + 122 + ], + "spans": [ + { + "bbox": [ + 68, + 85, + 515, + 122 + ], + "type": "text", + "content": "let's verify this proof to make sure it's working correctly. I need to check each step of the proof to ensure it's valid and identify any potential issues." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 133, + 458, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 133, + 458, + 159 + ], + "spans": [ + { + "bbox": [ + 68, + 133, + 458, + 159 + ], + "type": "text", + "content": "First, let's examine what we're trying to prove. It appears to be a trigonometric identity involving sine and cosine functions." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 169, + 196, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 169, + 196, + 188 + ], + "spans": [ + { + "bbox": [ + 68, + 169, + 196, + 188 + ], + "type": "text", + "content": "The proof begins with:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 193, + 512, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 193, + 512, + 225 + ], + "spans": [ + { + "bbox": [ + 68, + 193, + 512, + 225 + ], + "type": "text", + "content": "simp [Real.sin_sub, Real.sin_two.mul, Real.cos_two.mul, Real.sin(pi_div_four, Real.cos(pi_div_four] at h " + }, + { + "bbox": [ + 68, + 193, + 512, + 225 + ], + "type": "inline_equation", + "content": "\\vdash" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 229, + 514, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 229, + 514, + 254 + ], + "spans": [ + { + "bbox": [ + 68, + 229, + 514, + 254 + ], + "type": "text", + "content": "This step seems reasonable - we're using standard trigonometric identities to simplify both the hypothesis " + }, + { + "bbox": [ + 68, + 229, + 514, + 254 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 68, + 229, + 514, + 254 + ], + "type": "text", + "content": " and the goal." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 265, + 151, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 265, + 151, + 284 + ], + "spans": [ + { + "bbox": [ + 68, + 265, + 151, + 284 + ], + "type": "text", + "content": "Next, we have:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 289, + 167, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 289, + 167, + 308 + ], + "spans": [ + { + "bbox": [ + 68, + 289, + 167, + 308 + ], + "type": "text", + "content": "field_simp at h" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 312, + 479, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 312, + 479, + 338 + ], + "spans": [ + { + "bbox": [ + 68, + 312, + 479, + 338 + ], + "type": "text", + "content": "This tactic simplifies expressions involving field operations, which is appropriate for real-valued trigonometric expressions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 349, + 98, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 349, + 98, + 368 + ], + "spans": [ + { + "bbox": [ + 68, + 349, + 98, + 368 + ], + "type": "text", + "content": "Then:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 373, + 151, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 373, + 151, + 391 + ], + "spans": [ + { + "bbox": [ + 68, + 373, + 151, + 391 + ], + "type": "text", + "content": "ring_nf at h" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 396, + 497, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 396, + 497, + 421 + ], + "spans": [ + { + "bbox": [ + 68, + 396, + 497, + 421 + ], + "type": "text", + "content": "This normalizes ring expressions, which should help simplify our algebraic expressions further." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 433, + 167, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 433, + 167, + 451 + ], + "spans": [ + { + "bbox": [ + 68, + 433, + 167, + 451 + ], + "type": "text", + "content": "Finally, we have:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 68, + 455, + 495, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 455, + 495, + 476 + ], + "spans": [ + { + "bbox": [ + 68, + 455, + 495, + 476 + ], + "type": "text", + "content": "nlinarith [Real.sin_sq_add_cos_sq x, Real.sin_le_one x, Real.cos_le_one x]" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 68, + 491, + 517, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 491, + 517, + 528 + ], + "spans": [ + { + "bbox": [ + 68, + 491, + 517, + 528 + ], + "type": "text", + "content": "This is where I need to be careful. The 'nlinarith' tactic is trying to use non-linear arithmetic with the Pythagorean identity " + }, + { + "bbox": [ + 68, + 491, + 517, + 528 + ], + "type": "inline_equation", + "content": "\\sin^2 (x) + \\cos^2 (x) = 1'" + }, + { + "bbox": [ + 68, + 491, + 517, + 528 + ], + "type": "text", + "content": " and the bounds on sine and cosine functions." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 68, + 539, + 500, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 539, + 500, + 601 + ], + "spans": [ + { + "bbox": [ + 68, + 539, + 500, + 601 + ], + "type": "text", + "content": "The issue might be that we're using bounds ('Real.sin_le_one x, Real.cos_le_one x') rather than equalities. When dealing with trigonometric identities, we often need exact relationships rather than just bounds. The Pythagorean identity is an equality, but we're also including inequalities in our reasoning." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 68, + 611, + 515, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 611, + 515, + 661 + ], + "spans": [ + { + "bbox": [ + 68, + 611, + 515, + 661 + ], + "type": "text", + "content": "A better approach would be to focus on using the Pythagorean identity directly, possibly with additional facts about square roots. The `sqrt(2)` in our expressions suggests we might need properties about square roots, such as `sq_sqrt` to handle expressions like `(\\sqrt{2})^{\\sim}`." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 68, + 671, + 523, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 671, + 523, + 708 + ], + "spans": [ + { + "bbox": [ + 68, + 671, + 523, + 708 + ], + "type": "text", + "content": "Additionally, we should ensure that we're providing all the necessary facts to the arithmetic solver. For instance, we might need to explicitly include that '\\sqrt{2} \\geq 0'\\), which could be done with 'Real.sqrt_nonneg \\(2'\\)." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 776, + 303, + 787 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 776, + 303, + 787 + ], + "spans": [ + { + "bbox": [ + 290, + 776, + 303, + 787 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 85, + 190, + 100 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 85, + 190, + 100 + ], + "spans": [ + { + "bbox": [ + 69, + 85, + 190, + 100 + ], + "type": "text", + "content": "B.2. Rewriting Example" + } + ] + } + ], + "index": 0 + }, + { + "type": "code", + "bbox": [ + 68, + 125, + 525, + 310 + ], + "blocks": [ + { + "bbox": [ + 69, + 108, + 98, + 121 + ], + "lines": [ + { + "bbox": [ + 69, + 108, + 98, + 121 + ], + "spans": [ + { + "bbox": [ + 69, + 108, + 98, + 121 + ], + "type": "text", + "content": "Input:" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 68, + 125, + 525, + 310 + ], + "lines": [ + { + "bbox": [ + 68, + 125, + 525, + 310 + ], + "spans": [ + { + "bbox": [ + 68, + 125, + 525, + 310 + ], + "type": "text", + "content": "Complete the following Lean 4 code with explanatory comments preceding each line of code: \n```\n```\n```lean4\nimport Mathlib\nimport Aesop\nset_option maxHeartbeats 0\nopen BigOperators Real Nat Topology Rat\n-- Prove that if " + }, + { + "bbox": [ + 68, + 125, + 525, + 310 + ], + "type": "inline_equation", + "content": "n \\geq 2" + }, + { + "bbox": [ + 68, + 125, + 525, + 310 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 68, + 125, + 525, + 310 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 68, + 125, + 525, + 310 + ], + "type": "text", + "content": " distinct integers can be given such that the sum of any two of them is divisible by their difference.-/\ntheorem lean_workbook : ∀ n : N, 2 ≤ n → ∃ l : List Z, l.length = n ∧ ∀ x y, x ∈ l ∧ y ∈ l ∧ x ≠ y → (x + y) % (x - y) = 0 := by" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 68, + 339, + 525, + 596 + ], + "blocks": [ + { + "bbox": [ + 69, + 322, + 131, + 336 + ], + "lines": [ + { + "bbox": [ + 69, + 322, + 131, + 336 + ], + "spans": [ + { + "bbox": [ + 69, + 322, + 131, + 336 + ], + "type": "text", + "content": "Wrong Proof:" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "lines": [ + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "spans": [ + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "text", + "content": "- We need to prove that for any natural number " + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "inline_equation", + "content": "(n \\backslash \\text{geq} 2)" + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "text", + "content": ", there exists a list of " + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "inline_equation", + "content": "(n \\backslash)" + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "text", + "content": " distinct integers such that the sum of any two integers in the list is divisible by their difference. \n1. **Base Case**: When " + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "inline_equation", + "content": "(n = 2)" + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "text", + "content": ", we can choose the list " + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "inline_equation", + "content": "([0, 1])" + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "text", + "content": ". For any " + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "inline_equation", + "content": "(x \\backslash)" + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "inline_equation", + "content": "(y \\backslash)" + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "text", + "content": " in this list, if " + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "inline_equation", + "content": "(x \\backslash \\text{neq} y)" + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "inline_equation", + "content": "(x + y) \\mod (x - y) = 0" + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "text", + "content": ". \n2. **Inductive Step**: Assume the statement holds for some " + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "inline_equation", + "content": "(n \\backslash \\text{geq} 2)" + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "text", + "content": ". We need to show it holds for " + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "inline_equation", + "content": "(n + 1)" + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "text", + "content": ". By the inductive hypothesis, there exists a list of " + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "inline_equation", + "content": "(n \\backslash)" + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "text", + "content": " distinct integers " + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "inline_equation", + "content": "(1 \\backslash)" + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "text", + "content": " such that the sum of any two elements in " + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "inline_equation", + "content": "(1 \\backslash)" + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "text", + "content": " is divisible by their difference. We can add one more integer to this list and show that the new list still satisfies the condition. \n- / \nintro n hn \n-- Use a base case for " + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "inline_equation", + "content": "n = 2" + }, + { + "bbox": [ + 68, + 339, + 525, + 596 + ], + "type": "text", + "content": " \nuse [0, 1] \n-- Verify that the base case satisfies the conditions \nsimp \n-- Use Aesop to handle the general case \n<;> aesop" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "latex" + }, + { + "type": "code", + "bbox": [ + 68, + 625, + 525, + 761 + ], + "blocks": [ + { + "bbox": [ + 69, + 608, + 207, + 622 + ], + "lines": [ + { + "bbox": [ + 69, + 608, + 207, + 622 + ], + "spans": [ + { + "bbox": [ + 69, + 608, + 207, + 622 + ], + "type": "text", + "content": "Revised Comments and Proofs:" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 68, + 625, + 525, + 761 + ], + "lines": [ + { + "bbox": [ + 68, + 625, + 525, + 761 + ], + "spans": [ + { + "bbox": [ + 68, + 625, + 525, + 761 + ], + "type": "text", + "content": "/\\-We need to prove that for any natural number \\(\\backslash\\) (n\\geq 2 \\), there exists a list of \\(\\backslash\\) (n \\) distinct integers such that the sum of any two integers in the list is divisible by their difference. \n1. \\*\\*Base Case**: When \\(\\backslash\\) ( \\(n = 2\\backslash\\) ), we can choose the list \\(\\backslash ([0,2]\\backslash)\\). For any \\(\\backslash\\) (x \\) and \\(\\backslash\\) y \\) in this list, if \\(\\backslash\\) (x\\neq y \\), then \\(\\backslash\\) (x + y) mod (x - y) = 0 \\). For example, \\(\\backslash\\) (0 + 2) mod (0 - 2) = 2 \\mod -2 = 0 \\). \n2. \\*\\*Inductive Step**: Assume the statement holds for some \\(\\backslash\\) (n\\geq 2 \\). We need to show it holds for \\(\\backslash\\) (n + 1 \\). By the inductive hypothesis, there" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "code", + "guess_lang": "txt" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 777, + 303, + 787 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 777, + 303, + 787 + ], + "spans": [ + { + "bbox": [ + 290, + 777, + 303, + 787 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 69, + 84, + 526, + 342 + ], + "blocks": [ + { + "bbox": [ + 69, + 84, + 526, + 342 + ], + "lines": [ + { + "bbox": [ + 69, + 84, + 526, + 342 + ], + "spans": [ + { + "bbox": [ + 69, + 84, + 526, + 342 + ], + "type": "text", + "content": "exists a list of \\(\\backslash\\) (n \\(\\backslash\\)) distinct integers \\(\\backslash\\) (\\{a_1, a_2, \\ldots, a_n\\} \\}) such that the sum of any two elements in this list is divisible by their difference. We can add one more integer to this list to form a new list of \\(\\backslash\\) (n + 1 \\(\\backslash\\)) integers. A good choice for the new integer is \\(\\backslash\\) (a_n+1 \\() =\\) a_n + (n+1)! \\)). This ensures that for any \\(\\backslash\\) (i \\(\\backslash\\)) and \\(\\backslash\\) (j \\(\\backslash\\)) in the range \\(\\backslash\\) (1 \\(\\backslash\\) leq i, j \\(\\backslash\\) leq n+1 \\(\\backslash\\)) with \\(\\backslash\\) (i \\(\\backslash\\) neq j \\(\\backslash\\)), the sum \\(\\backslash\\) (a_i + a_j \\(\\backslash\\)) is divisible by their difference \\(\\backslash\\) (a_i - a_j \\(\\backslash\\)). This is because \\(\\backslash\\) (a_i + a_j) - (a_i - a_j) = 2a_j \\), and since \\(\\backslash\\) (a_j \\(\\backslash\\)) is an integer, \\(\\backslash\\) (2a_j \\(\\backslash\\)) is always divisible by \\(\\backslash\\) (a_i - a_j \\)). Thus, the list \\(\\backslash\\) (\\{a_1, a_2, \\ldots, a_ldots, a_n, a_{\\{n+1\\}}\\} \\}) satisfies the condition, completing the inductive step. \n- \n- Introduce the natural number \\(n\\) and the assumption that \\(n \\geq 2\\) \nintro n hn \n- Use a construction to construct the list of integers \nuse List.replicate n 0 \n- Simplify the conditions given the assumption \\(n \\geq 2\\) \nsimp [hn, List.length, List.mem_replicate, List.mem_range, List.mem_filter] \n- Use Aesop to complete the proof \n<;> aesop\"" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_body" + } + ], + "index": 0, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 67, + 354, + 129, + 365 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 354, + 129, + 365 + ], + "spans": [ + { + "bbox": [ + 67, + 354, + 129, + 365 + ], + "type": "text", + "content": "Claude CoTs:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 373, + 462, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 373, + 462, + 397 + ], + "spans": [ + { + "bbox": [ + 69, + 373, + 462, + 397 + ], + "type": "text", + "content": "Let me verify my proof for this problem about distinct integers with divisibility properties..." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 409, + 515, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 409, + 515, + 444 + ], + "spans": [ + { + "bbox": [ + 69, + 409, + 515, + 444 + ], + "type": "text", + "content": "Looking at my current solution, I'm trying to prove that for any " + }, + { + "bbox": [ + 69, + 409, + 515, + 444 + ], + "type": "inline_equation", + "content": "n \\geq 2" + }, + { + "bbox": [ + 69, + 409, + 515, + 444 + ], + "type": "text", + "content": ", there exist " + }, + { + "bbox": [ + 69, + 409, + 515, + 444 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 69, + 409, + 515, + 444 + ], + "type": "text", + "content": " distinct integers where the sum of any two is divisible by their difference." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 456, + 509, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 456, + 509, + 480 + ], + "spans": [ + { + "bbox": [ + 69, + 456, + 509, + 480 + ], + "type": "text", + "content": "Wait, I see an issue here. I've used '[0, 1]' as my example list, but let me check if this actually satisfies the condition:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 481, + 508, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 481, + 508, + 493 + ], + "spans": [ + { + "bbox": [ + 69, + 481, + 508, + 493 + ], + "type": "text", + "content": "- For " + }, + { + "bbox": [ + 69, + 481, + 508, + 493 + ], + "type": "inline_equation", + "content": "x = 0" + }, + { + "bbox": [ + 69, + 481, + 508, + 493 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 69, + 481, + 508, + 493 + ], + "type": "inline_equation", + "content": "y = 1" + }, + { + "bbox": [ + 69, + 481, + 508, + 493 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 69, + 481, + 508, + 493 + ], + "type": "inline_equation", + "content": "(x + y) \\% (x - y) = (0 + 1) \\% (0 - 1) = 1 \\% (-1) = 0" + }, + { + "bbox": [ + 69, + 481, + 508, + 493 + ], + "type": "text", + "content": "?" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 504, + 500, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 504, + 500, + 539 + ], + "spans": [ + { + "bbox": [ + 69, + 504, + 500, + 539 + ], + "type": "text", + "content": "Let me verify this calculation. We have " + }, + { + "bbox": [ + 69, + 504, + 500, + 539 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 69, + 504, + 500, + 539 + ], + "type": "text", + "content": " (-1) which should be the remainder when 1 is divided by -1. Since -1 divides 1 as " + }, + { + "bbox": [ + 69, + 504, + 500, + 539 + ], + "type": "inline_equation", + "content": "-1 \\times (-1) = 1" + }, + { + "bbox": [ + 69, + 504, + 500, + 539 + ], + "type": "text", + "content": ", the remainder is 0. So this seems correct." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 552, + 508, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 552, + 508, + 582 + ], + "spans": [ + { + "bbox": [ + 69, + 552, + 508, + 582 + ], + "type": "text", + "content": "But wait, there's another issue. I've only constructed a list for " + }, + { + "bbox": [ + 69, + 552, + 508, + 582 + ], + "type": "inline_equation", + "content": "n = 2" + }, + { + "bbox": [ + 69, + 552, + 508, + 582 + ], + "type": "text", + "content": ", but my proof needs to work for any " + }, + { + "bbox": [ + 69, + 552, + 508, + 582 + ], + "type": "inline_equation", + "content": "n \\geq 2" + }, + { + "bbox": [ + 69, + 552, + 508, + 582 + ], + "type": "text", + "content": ". The line:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 588, + 127, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 588, + 127, + 607 + ], + "spans": [ + { + "bbox": [ + 69, + 588, + 127, + 607 + ], + "type": "text", + "content": "use [0, 1]" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 612, + 502, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 612, + 502, + 635 + ], + "spans": [ + { + "bbox": [ + 69, + 612, + 502, + 635 + ], + "type": "text", + "content": "only provides a list of length 2, but I need to handle cases where " + }, + { + "bbox": [ + 69, + 612, + 502, + 635 + ], + "type": "inline_equation", + "content": "n > 2" + }, + { + "bbox": [ + 69, + 612, + 502, + 635 + ], + "type": "text", + "content": " as well." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 648, + 498, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 648, + 498, + 696 + ], + "spans": [ + { + "bbox": [ + 69, + 648, + 498, + 696 + ], + "type": "text", + "content": "I need to generalize my construction. Perhaps I should create a list of length " + }, + { + "bbox": [ + 69, + 648, + 498, + 696 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 69, + 648, + 498, + 696 + ], + "type": "text", + "content": " instead of just assuming the base case will handle everything. The 'simp' and 'aesop' tactics might not be able to automatically construct this list for arbitrary " + }, + { + "bbox": [ + 69, + 648, + 498, + 696 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 69, + 648, + 498, + 696 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 708, + 516, + 744 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 708, + 516, + 744 + ], + "spans": [ + { + "bbox": [ + 69, + 708, + 516, + 744 + ], + "type": "text", + "content": "Let me reconsider this approach... What if I use a more general construction? I could use 'List.replicate n 0' to create a list of " + }, + { + "bbox": [ + 69, + 708, + 516, + 744 + ], + "type": "inline_equation", + "content": "\\mathbf{n}" + }, + { + "bbox": [ + 69, + 708, + 516, + 744 + ], + "type": "text", + "content": " zeros. But then all elements would be the same, and the problem requires distinct integers." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 776, + 302, + 787 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 776, + 302, + 787 + ], + "spans": [ + { + "bbox": [ + 290, + 776, + 302, + 787 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 85, + 496, + 123 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 85, + 496, + 123 + ], + "spans": [ + { + "bbox": [ + 68, + 85, + 496, + 123 + ], + "type": "text", + "content": "Actually, I should think more carefully about what list would satisfy the divisibility condition. If I have integers " + }, + { + "bbox": [ + 68, + 85, + 496, + 123 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 68, + 85, + 496, + 123 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 85, + 496, + 123 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 68, + 85, + 496, + 123 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 68, + 85, + 496, + 123 + ], + "type": "inline_equation", + "content": "a \\neq b" + }, + { + "bbox": [ + 68, + 85, + 496, + 123 + ], + "type": "text", + "content": ", then I need " + }, + { + "bbox": [ + 68, + 85, + 496, + 123 + ], + "type": "inline_equation", + "content": "(a + b) \\% (a - b) = 0" + }, + { + "bbox": [ + 68, + 85, + 496, + 123 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 133, + 505, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 133, + 505, + 170 + ], + "spans": [ + { + "bbox": [ + 68, + 133, + 505, + 170 + ], + "type": "text", + "content": "For the general case, I might need to use a more sophisticated construction like arithmetic sequences or something based on factorials to ensure the divisibility property holds." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 181, + 516, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 181, + 516, + 231 + ], + "spans": [ + { + "bbox": [ + 68, + 181, + 516, + 231 + ], + "type": "text", + "content": "The issue with my current proof is that I'm not providing a general construction for " + }, + { + "bbox": [ + 68, + 181, + 516, + 231 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 68, + 181, + 516, + 231 + ], + "type": "text", + "content": " elements - I'm only handling " + }, + { + "bbox": [ + 68, + 181, + 516, + 231 + ], + "type": "inline_equation", + "content": "n = 2" + }, + { + "bbox": [ + 68, + 181, + 516, + 231 + ], + "type": "text", + "content": ". And even if 'aesop' could somehow extend this, I haven't given it enough guidance on how to construct the larger list." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 241, + 196, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 241, + 196, + 253 + ], + "spans": [ + { + "bbox": [ + 68, + 241, + 196, + 253 + ], + "type": "text", + "content": "To fix this, I should:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 253, + 496, + 278 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 68, + 253, + 405, + 265 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 253, + 405, + 265 + ], + "spans": [ + { + "bbox": [ + 68, + 253, + 405, + 265 + ], + "type": "text", + "content": "1. Either provide an explicit construction for arbitrary " + }, + { + "bbox": [ + 68, + 253, + 405, + 265 + ], + "type": "inline_equation", + "content": "n" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 265, + 496, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 265, + 496, + 278 + ], + "spans": [ + { + "bbox": [ + 67, + 265, + 496, + 278 + ], + "type": "text", + "content": "2. Or use induction properly, showing how to extend from " + }, + { + "bbox": [ + 67, + 265, + 496, + 278 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 265, + 496, + 278 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 265, + 496, + 278 + ], + "type": "inline_equation", + "content": "n + 1" + }, + { + "bbox": [ + 67, + 265, + 496, + 278 + ], + "type": "text", + "content": " elements" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 68, + 288, + 511, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 288, + 511, + 361 + ], + "spans": [ + { + "bbox": [ + 68, + 288, + 511, + 361 + ], + "type": "text", + "content": "I think I need to reconsider what list would work for this problem. Maybe something like powers of 2, or a list where consecutive elements have a specific relationship? The factorial approach mentioned in my comment might be promising - adding elements like " + }, + { + "bbox": [ + 68, + 288, + 511, + 361 + ], + "type": "inline_equation", + "content": "\\mathbf{a}_1" + }, + { + "bbox": [ + 68, + 288, + 511, + 361 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 288, + 511, + 361 + ], + "type": "inline_equation", + "content": "\\mathbf{a}_2" + }, + { + "bbox": [ + 68, + 288, + 511, + 361 + ], + "type": "text", + "content": ", ..., " + }, + { + "bbox": [ + 68, + 288, + 511, + 361 + ], + "type": "inline_equation", + "content": "\\mathbf{a}_n" + }, + { + "bbox": [ + 68, + 288, + 511, + 361 + ], + "type": "text", + "content": " where each new element is constructed to maintain the divisibility relationship with all previous elements." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 372, + 498, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 372, + 498, + 422 + ], + "spans": [ + { + "bbox": [ + 68, + 372, + 498, + 422 + ], + "type": "text", + "content": "I should try constructing a list like " + }, + { + "bbox": [ + 68, + 372, + 498, + 422 + ], + "type": "inline_equation", + "content": "[0, 1!, 2!, \\ldots, (n-1)!!]" + }, + { + "bbox": [ + 68, + 372, + 498, + 422 + ], + "type": "text", + "content": " or something similar and verify that it satisfies the divisibility condition. This would give me distinct integers with potentially useful divisibility properties." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 451, + 154, + 465 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 451, + 154, + 465 + ], + "spans": [ + { + "bbox": [ + 67, + 451, + 154, + 465 + ], + "type": "text", + "content": "C. RL Details" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 478, + 525, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 478, + 525, + 505 + ], + "spans": [ + { + "bbox": [ + 67, + 478, + 525, + 505 + ], + "type": "text", + "content": "Figure 6 shows the overall distribution of valid proofs in pass@32 testing. We collect the distribution base on our Leanabell-Prover-GD-SFT model." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 776, + 303, + 787 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 776, + 303, + 787 + ], + "spans": [ + { + "bbox": [ + 290, + 776, + 303, + 787 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 73, + 275, + 520, + 511 + ], + "blocks": [ + { + "bbox": [ + 73, + 275, + 520, + 511 + ], + "lines": [ + { + "bbox": [ + 73, + 275, + 520, + 511 + ], + "spans": [ + { + "bbox": [ + 73, + 275, + 520, + 511 + ], + "type": "image", + "image_path": "777becb88cf144ee0b188a5eb4a011a795525f9e8a7316c04c0b6494231fdd17.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 69, + 524, + 524, + 565 + ], + "lines": [ + { + "bbox": [ + 69, + 524, + 524, + 565 + ], + "spans": [ + { + "bbox": [ + 69, + 524, + 524, + 565 + ], + "type": "text", + "content": "Figure 6 | Distribution of valid proofs in pass@32. We calculate this distribution based on the Leanabell-Prover-GD-SFT model. To build this set, we remain the formal statements who has at least sampled proofs are valid by Lean 4 compiler." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 777, + 302, + 786 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 777, + 302, + 786 + ], + "spans": [ + { + "bbox": [ + 290, + 777, + 302, + 786 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 22 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06196/00c24729-2bee-4726-a0b4-13a163fe9cf3_content_list.json b/data/2025/2504_06xxx/2504.06196/00c24729-2bee-4726-a0b4-13a163fe9cf3_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..f34a19d8a27feebd82b869a7511142e337f09091 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/00c24729-2bee-4726-a0b4-13a163fe9cf3_content_list.json @@ -0,0 +1,5887 @@ +[ + { + "type": "text", + "text": "TxGemma:", + "text_level": 1, + "bbox": [ + 393, + 84, + 570, + 111 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Efficient and Agentic LLMs for Therapeutics", + "text_level": 1, + "bbox": [ + 125, + 127, + 833, + 157 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Eric Wang*,†,1, Samuel Schmidgall*,1, Paul F. Jaeger1, Fan Zhang2, Rory Pilgrim2, Yossi Matias2, Joelle Barral1, David Fleet1 and Shekoofeh Azizi†,1", + "bbox": [ + 153, + 175, + 805, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Google DeepMind, $^{2}$ Google Research", + "bbox": [ + 316, + 222, + 640, + 242 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Therapeutic development is a costly and high-risk endeavor that is often plagued by high failure rates. To address this, we introduce TxGemma, a suite of efficient, generalist large language models (LLMs) capable of therapeutic property prediction as well as interactive reasoning and explainability. Unlike task-specific models, TxGemma synthesizes information from diverse sources, enabling broad application across the therapeutic development pipeline. The suite includes 2B, 9B, and 27B parameter models, fine-tuned from Gemma-2 on a comprehensive dataset of small molecules, proteins, nucleic acids, diseases, and cell lines. Across 66 therapeutic development tasks, TxGemma achieved superior or comparable performance to the state-of-the-art generalist model on 64 (superior on 45), and against state-of-the-art specialist models on 50 (superior on 26). Fine-tuning TxGemma models on therapeutic downstream tasks, such as clinical trial adverse event prediction, requires less training data than fine-tuning base LLMs, making TxGemma suitable for data-limited applications. Beyond these predictive capabilities, TxGemma features conversational models that bridge the gap between general LLMs and specialized property predictors. These allow scientists to interact in natural language, provide mechanistic reasoning for predictions based on molecular structure, and engage in scientific discussions. Building on this, we further introduce Agentic-Tx, a generalist therapeutic agentic system powered by Gemini 2.5 that reasons, acts, manages diverse workflows, and acquires external domain knowledge. Agentic-Tx surpasses prior leading models on the Humanity's Last Exam benchmark (Chemistry & Biology) with $52.3\\%$ relative improvement over o3-mini (high) and $26.7\\%$ over o3-mini (high) on GPQA (Chemistry). On ChemBench, TxGemma excels with improvements of $6.3\\%$ (ChemBench-Preference) and $2.4\\%$ (ChemBench-Mini) over o3-mini (high), as well as $17.7\\%$ and $5.6\\%$ over o1, respectively. TxGemma's collection is released as open models, enabling researchers to adapt and validate it on their own diverse datasets, thus facilitating more challenging real-world applications.", + "bbox": [ + 143, + 284, + 844, + 602 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 111, + 626, + 261, + 643 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The pharmaceutical industry faces significant challenges in bringing new therapeutics to market. High attrition rates and lengthy, costly development timelines [3, 4] necessitate innovative approaches to therapeutic development. Success requires a drug candidate to not only demonstrate efficacy but also possess favorable safety, metabolic stability, pharmacokinetic/pharmacodynamic properties and developability, among other characteristics. Determining these diverse characteristics often relies on a large array of complex and expensive experimental procedures, highlighting the need for more efficient methods.", + "bbox": [ + 109, + 656, + 885, + 748 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Computational approaches, such as machine learning, are emerging as powerful tools to address these challenges. Leveraging predictive models trained on curated datasets allows researchers to prioritize promising candidates early in the development process, reducing reliance on costly experimental assays [5]. Publicly available databases of molecular properties and biological activity are crucial for training and validating these models. In this area, a major development was the curation of the Therapeutics Data Commons (TDC) [6, 7, 8], which contains datasets and benchmarks for many different tasks throughout the therapeutic development pipeline, ranging from early-stage target identification to late-stage clinical trial approval.", + "bbox": [ + 109, + 753, + 887, + 861 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advancements in large language models (LLMs) offer a compelling opportunity to leverage available datasets and address limitations in the therapeutic development process. LLMs have demonstrated the capacity to integrate and learn from diverse data sources across various domains, including scientific applications [9, 10,", + "bbox": [ + 109, + 864, + 887, + 912 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.06196v1 [cs.AI] 8 Apr 2025", + "bbox": [ + 22, + 276, + 58, + 700 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Equal contributions.", + "bbox": [ + 109, + 934, + 251, + 946 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Corresponding authors: {shekazizi, ericzwang}@google.com", + "bbox": [ + 109, + 946, + 482, + 960 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0203c8f019b173fb8d62c4ee904f1f418162cc6b09d58945c721893ac5ac6ad4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 114, + 85, + 883, + 369 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/6c5ae632ad262260986f1fe3efcfbe3a474c92834958a1bd7c97690782bbf10d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 116, + 385, + 480, + 512 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/778634aa5c8b1460d1e1005fbb3e9371ca11ba143cab37feb55f39531fd8284d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 116, + 537, + 480, + 667 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/0114d9a00ab47d79177889b7773fe64b8b18a5fc2fdf408061a2db63ed40fe38.jpg", + "image_caption": [ + "Figure 1 | Overview of TxGemma. (top) All TxGemma variants are trained on diverse data sources of the Therapeutic Data Commons (TDC). TxGemma-Predict comes in three size variants (2B, 9B, and 27B) and is trained for high-performance predictions on a broad set of therapeutic development tasks. TxGemma-Chat features two variants (9B and 27B) and is trained on a combination of TDC data with general Gemma-2 instruction tuning data to retain conversational and reasoning capabilities. Agentic-Tx, a therapeutics-focused agentic system powered by Gemini 2.5, has access to 18 tools including TxGemma-Predict and TxGemma-Chat to collect external knowledge and manages complex tasks in either autonomous or interactive settings. (bottom-right) Absolute performance of Agentic-Tx compared to best-in-class models on three complex therapeutic-related reasoning benchmarks. The state-of-the-art (SOTA) values are obtained from [1, 2] and details are listed in Table 3. Dashed lines: L=lowest, M=mean, H=highest human scores. (bottom-left) Relative performance changes of TxGemma-Predict compared to the SOTA generalist model for each task type. The assignment of the 66 evaluated TDC tasks to task types is shown in Tables S.2 and S.3. The bottom bar chart shows a summary of results where TxGemma-Predict outperforms or nearly matches SOTA (light blue), and outperforms SOTA (darker blue)." + ], + "image_footnote": [], + "bbox": [ + 117, + 675, + 475, + 724 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/edd756cd1d85fd738f6f39022b8b79d554ff5a528cce9f961383a0bd3ccf1307.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 516, + 387, + 879, + 474 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/45bd5bb59ecbb59ebf3c3bd02e1a6e2b8cdf58c01e28987f97b63a2162234f73.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 479, + 879, + 558 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/fab9bc74db450dc8de75df386d1ccaa290832005e9a1245e9ad9844556fd03a3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 561, + 880, + 642 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/6c7190df8fb3e455a4678103a2125ca56783f3046dad39ad10081da4aeb70ed1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 646, + 883, + 724 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 870, + 936, + 883, + 948 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "11]. Their potential to connect disparate aspects of drug development, such as chemical structure, biological activity, and clinical trial outcomes, is particularly exciting. In this context, we have previously introduced Tx-LLM, a LLM fine-tuned from a collection of question-answer instruction-tuning datasets based on TDC [12]. While promising, the model's lack of conversational capabilities prevented reasoning or user interaction, limiting its value for scientists who require a model that can understand complex queries and engage in nuanced discussions.", + "bbox": [ + 109, + 89, + 885, + 180 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this work, we introduce TxGemma, a suite of efficient, generalist LLMs trained for therapeutics. Building on, but significantly extending, our previous work [12], TxGemma leverages LLMs to synthesize information from diverse sources. The suite includes 2B, 9B, and 27B parameter models, fine-tuned from Gemma-2 [13, 14] using a collection of therapeutic instruction-tuning datasets encompassing small molecules, proteins, nucleic acids, diseases, and cell lines. For the first time in therapeutic AI, TxGemma features conversational counterparts capable of reasoning and explanation, moving beyond black-box predictions to facilitate mechanistic understanding and scientific discussions. Our key contributions are as follows:", + "bbox": [ + 109, + 186, + 885, + 294 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Efficient Generalist Therapeutic LLMs: TxGemma represents a potential shift from task-specific AI to efficient generalist models in therapeutic development. These efficient LLMs (2B-27B parameters) offer a competitive alternative to specialized models, achieving strong performance across a broad range of predictive and generative tasks. Out of 66 therapeutic development tasks curated by TDC, TxGemma-Predict outperforms or nearly matches the state-of-the-art generalist model on 64 (outperforms on 45) and state-of-the-art specialist models on 50 (outperforms on 26). Additionally, fine-tuning TxGemma models on clinical trial adverse event prediction requires less data to achieve strong performance compared to base Gemma-2 models, an important advantage for data-limited fields.", + "- Explainable and Interactive Therapeutic Models: TxGemma-Chat introduces reasoning and explanation capabilities, bridging the gap between general LLMs and specialized property predictors. Scientists can interact with TxGemma-Chat using natural language, exploring complex questions, receive explanations for predictions (e.g., based on molecular structure), and engage in scientific discussions.", + "- Agentic Orchestration of Therapeutic Development Workflows: We further introduce Agentic-Tx, a therapeutics-focused agentic system powered by Gemini 2.5, demonstrating how TxGemma models can be integrated as tools. Equipped with 18 tools, Agentic-Tx solves complex, multi-step problems, achieving state-of-the-art results on reasoning-intensive chemistry and biology benchmarks, including Humanity's Last Exam [15] and ChemBench [1].", + "- Enabling Innovative Research with Open Models: Understanding the prevalence of proprietary data in therapeutic research, we release TxGemma models trained only on datasets with commercial licenses as open models to empower researchers to adapt and refine them on their own data. This facilitates validation and potential performance improvements tailored to their specific research needs, paving the way for therapy safety and efficacy in more challenging real-world therapeutic applications." + ], + "bbox": [ + 125, + 306, + 883, + 664 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Methods", + "text_level": 1, + "bbox": [ + 109, + 681, + 223, + 696 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Data", + "text_level": 1, + "bbox": [ + 109, + 714, + 192, + 728 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Therapeutic Data Commons (TDC) We leverage the Therapeutic Data Commons (TDC) [7, 6], a comprehensive collection of 66 AI-ready datasets spanning the drug discovery and development pipeline. TDC includes over 15 million datapoints across various biomedical entities and encompasses single-instance prediction, multi-instance prediction, and generation tasks [7]. We focus on TDC tasks relevant to drug discovery, incorporating diverse therapeutic representations: SMILES strings (small molecules), amino acid sequences (proteins and peptides, including specialized representations for MHC molecules and T-cell receptors), nucleotide sequences (nucleic acids), and natural language text (disease/cell line names) (see Table S.6 for examples). Many tasks combine multiple representations. (See Table S.1 for task inclusion criteria and Tables S.7 and S.8 for biological contexts of certain tasks.)", + "bbox": [ + 109, + 739, + 885, + 878 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Therapeutic Instruction-Tuning Following Chaves et al. [12], we transform the raw TDC data into an instruction-tuning format suitable for LLMs. Each data point is formatted as a prompt:", + "bbox": [ + 109, + 882, + 883, + 914 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 870, + 936, + 883, + 948 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Instruction: Briefly describes the task.", + "- Context: Provides 2-3 sentences of relevant biochemical background, derived from TDC descriptions and literature.", + "- Question: Queries a specific therapeutic property, incorporating textual representations of therapeutics and/or targets (e.g., \"Does the following molecule cross the blood-brain barrier? \").", + "- Answer: Formatted as (A)/(B) for binary classification, a binned continuous value for regression, or a SMILES string for generation." + ], + "bbox": [ + 135, + 90, + 883, + 194 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This process yields 7,080,338 training, 956,575 validation, and 1,917,297 test data points (Figure S.1, Tables S.2 and S.3). Data splits closely follow TDC's recommended methodologies (random, scaffold, cold-start, combination, temporal) (Table S.2, Table S.3). Detailed task descriptions are in Tables S.4 and S.5.", + "bbox": [ + 109, + 210, + 885, + 257 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We employ a few-shot prompting strategy to promote in-context learning [16], using a blend of $70\\%$ zero-shot and $30\\%$ few-shot prompts [17, 12]. For few-shot prompts, we randomly sample examples from the training set (Table S.9), as intra-training set similarity is higher than training-test set similarity (Figure S.2). The number of examples is uniformly selected between 1 and 10 so that few-shot prompting is robust to the number of examples during evaluation.", + "bbox": [ + 109, + 262, + 885, + 339 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 Modeling", + "text_level": 1, + "bbox": [ + 109, + 351, + 233, + 368 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Base LLM. TxGemma is built upon the Gemma-2 [14] family of lightweight, state-of-the-art open LLMs. Gemma-2 models utilize a decoder-only transformer architecture, incorporating architectural modifications such as interleaved local-global attention and group-query attention, and are trained using Gemini technology [18]. We utilize Gemma-2 base models at 2B, 9B, and 27B parameters. 2B and 9B Gemma-2 models were initially trained via knowledge distillation [14].", + "bbox": [ + 109, + 377, + 885, + 455 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Predictive Model Fine-Tuning. We fine-tune the 2B, 9B, and 27B Gemma-2 base models on the therapeutic instruction-tuning data derived from TDC, creating TxGemma-2B-Predict, TxGemma-9B-Predict, and TxGemma-27B-Predict, respectively. Training was performed across all TDC tasks, with mixture ratios proportional to the number of training data points (see Tables S.2 and S.3 for data distribution). This encompassed all approximately 7 million training examples, comprising 3.3 million from regression/generation and 3.7 million from binary classification tasks. Fine-tuning proceeded for 67B tokens (12 epochs) using 256 TPUv4 chips with 8-way data replication, 4-way sequence sharding, and 4-way model sharding. In this work, \"TxGemma\" generally refers to the generalist, predictive TxGemma-27B-Predict.", + "bbox": [ + 109, + 458, + 885, + 580 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Conversational Model Fine-Tuning. We also trained conversational counterparts, TxGemma-9B-Chat and TxGemma-27B-Chat, by supplementing the therapeutic instruction-tuning data with general instruction-tuning data, as detailed in the Gemma-2 report [14]. The training data mixture comprised $30\\%$ therapeutic data and $70\\%$ general instruction-tuning data. Conversational models were trained using the same number of tokens and TPU configuration as the predictive models.", + "bbox": [ + 109, + 585, + 885, + 662 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.3 Evaluating Predictive Performance", + "text_level": 1, + "bbox": [ + 109, + 674, + 460, + 691 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Prompting strategy For test set evaluations, we use 10-shot prompting, selecting exemplars from the nearest neighbors within the combined training and validation set (not the test set), as detailed in Table S.9. Nearest neighbors were determined using different methods based on molecule type. For small molecules, we used RDKit [19] to generate Morgan fingerprints (radius 2 and size 2048), representing molecular substructures as binary vectors. Subsequently, we used Chemfp [20] to compute Tanimoto similarities, which quantify fingerprint overlap. For amino acid and nucleotide sequences, nearest neighbors were defined by percent sequence identity, determined through multiple sequence alignments performed with Clustal Omega [21].", + "bbox": [ + 109, + 700, + 885, + 808 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Performance Metrics and Statistical Tests We assess performance using the preferred metrics for each task, as defined by TDC [7] and used by previous models. Binary classification tasks are assessed with area under the receiver operating characteristic curve (AUROC), area under the precision-recall curve (AUPRC), and accuracy. Regression tasks use Spearman's and Pearson correlation coefficients, mean absolute error (MAE), and mean squared error (MSE). The USPTO generation task uses \"set accuracy,\" scoring 1 for perfect overlap between predicted and true reactant sets, and 0 otherwise. Bootstrapped metrics are calculated", + "bbox": [ + 109, + 813, + 885, + 905 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 870, + 936, + 883, + 948 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Prompt: Imagine an early virtual screening campaign setting. Which of the following two candidates would you prefer for further development?", + "bbox": [ + 155, + 99, + 467, + 133 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "", + "bbox": [ + 156, + 145, + 264, + 155 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "", + "bbox": [ + 156, + 161, + 266, + 170 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Agent $\\rightarrow$ TxGemma-ClinTox: Is the following toxic?", + "bbox": [ + 535, + 99, + 792, + 111 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "", + "bbox": [ + 537, + 112, + 645, + 121 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "TxGemma ClinTox: Toxic", + "bbox": [ + 537, + 123, + 665, + 133 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Agent $\\rightarrow$ TxGemma-ClinTox: Is the following toxic?", + "bbox": [ + 535, + 143, + 795, + 154 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "", + "bbox": [ + 537, + 155, + 645, + 165 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "TxGemma ClinTox: Non-toxic", + "bbox": [ + 537, + 167, + 689, + 176 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/a3c869088761c5ea30145738fb0e7de8fb845000d423b65d65ef9ab5098f21ca.jpg", + "image_caption": [ + "Figure 2 | Example workflow of agentic planning and execution with Agentic-Tx. Agentic-Tx uses the ReAct framework [22] to interleave thought with tool-usage. When a user poses a query, Agentic-Tx checks whether the query structure matches any defined tool trigger. If so, the query is routed to the corresponding tool, which (i) parses the request, (ii) invokes specialized logic, and (iii) returns a structured answer to the agent. The agent then composes a user-facing response. This adaptive tool-use mechanism is especially helpful for tasks that require external references, chemical data transformations, or precise chemical information, areas where self-contained LLMs often hallucinate. In the displayed example, Agentic-Tx uses two tools to solve a complex therapeutic task: TxGemma-Chat and the clinical toxicity prediction tool based on TxGemma-Predict." + ], + "image_footnote": [], + "bbox": [ + 163, + 189, + 833, + 349 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Agent $\\rightarrow$ TxGemma-Chat: Given two drug candidates, what factors would influence your decision to prioritize one over the other in an early virtual screening campaign?", + "bbox": [ + 155, + 361, + 467, + 392 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "TxGemma-Chat: Investigate whether the drugs would pass through a clinical trial, based on properties such as toxicity.", + "bbox": [ + 155, + 397, + 460, + 419 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Agent: Final answer: ", + "bbox": [ + 612, + 383, + 828, + 395 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Natural Language Input", + "bbox": [ + 148, + 443, + 305, + 457 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Reason + Action", + "bbox": [ + 354, + 443, + 462, + 455 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Tool-use", + "bbox": [ + 563, + 443, + 625, + 454 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Final Answer", + "bbox": [ + 727, + 443, + 813, + 454 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "using 1000 samples. To compare overall performance between two models across all TDC tasks, we use the non-parametric Wilcoxon signed-rank test and report the corresponding p-value (details in Appendix C.1).", + "bbox": [ + 109, + 637, + 883, + 669 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.4 Agentic System", + "text_level": 1, + "bbox": [ + 109, + 680, + 289, + 696 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "One limitation of LLMs for discovery is that, while their prediction capabilities are powerful, they do not have access to up-to-date external knowledge, such as research articles or domain-specific prediction models. These knowledge cut-offs prevent the model from answering questions outside of its training scope. Additionally, some questions involve multiple reasoning steps to solve, for example, the question \"What structural modifications could improve the potency of the given drug?\" requires iteratively searching the drug's structural space and then prompting TxGemma to predict potency.", + "bbox": [ + 109, + 705, + 885, + 797 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Agentic-Tx, our therapeutics-focused agentic system powered by Gemini 2.5 [18], extends TxGemma's capabilities by orchestrating such complex workflows. Agentic-Tx employs a modular, tool-usage paradigm, in contrast to TxGemma's direct generation of solutions.", + "bbox": [ + 109, + 803, + 887, + 849 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Reasoning and Action Framework Agentic-Tx utilizes the ReAct framework [22], allowing it to interleave reasoning steps (\"thoughts\") with actions (tool use). The agentic system receives a task or question and iteratively takes actions based on its current context. Each action typically involves using a tool, which", + "bbox": [ + 109, + 854, + 885, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 870, + 936, + 883, + 948 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "returns an observation. Key to ReAct is this iterative process of observing, reasoning, and acting, allowing Agentic-Tx to dynamically adjust its approach based on the information it gathers. Because tools may return large outputs, we summarize these observations in order to maintain a concise and relevant context. This iterative process of observing, reasoning, acting, and updating its context allows Agentic-Tx to dynamically adjust its approach and gather the necessary information to answer the initial query. Finally, Agentic-Tx integrates the gathered information and formulates a user-friendly response.", + "bbox": [ + 109, + 90, + 885, + 181 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Agentic Tools Agentic-Tx is equipped with 18 tools across four categories (detailed tool descriptions are in Table S.12). They can be broadly categorized as:", + "bbox": [ + 109, + 186, + 885, + 217 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. TxGemma-based Tools: These provide access to TxGemma's capabilities. The Chat tool enables interaction with TxGemma-27B-Chat. The ClinicalTox and ToxCast tools utilize TxGemma-27B-Predict for toxicity predictions. $IC_{50}$ returns the predicted normalized $IC_{50}$ between a drug and protein, the Mutagenicity tool predicts drug mutagenicity, and the Phase1 Trial tool predicts whether a drug would pass a Phase 1 clinical trial.", + "2. General Tools: These query external knowledge resources, including PubMed, Wikipedia, and the web.", + "3. Molecule Tools: These leverage domain-specific libraries for tasks such as retrieving molecular descriptors (e.g., from PubChem) and performing chemical structure conversions.", + "4. Gene & Protein Tools: These leverage domain-specific libraries for tasks involving genes or proteins, such as retrieving gene descriptions and protein descriptions (e.g., from the NCBI Gene database)." + ], + "bbox": [ + 129, + 232, + 883, + 398 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3 Results", + "text_level": 1, + "bbox": [ + 109, + 419, + 210, + 434 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1 TxGemma Predictive Performance", + "text_level": 1, + "bbox": [ + 109, + 452, + 459, + 467 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1.1 Comparison with best-in-class therapeutic models", + "text_level": 1, + "bbox": [ + 109, + 479, + 571, + 494 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To provide a comprehensive evaluation of our models' predictive capabilities, we benchmark against both specialist and generalist baselines. For specialist comparisons, we define best-in-class performance metrics for each task using previous models. Specifically, we utilize TDC leaderboard scores for tasks where available (ADMET, DrugCombo, DTI DG). For remaining tasks, values are reported from a literature review and are detailed in Tables S.13 and S.14. These specialist performance values align with those reported in Chaves et al. [12]. Additionally, we compare our models against three prominent therapeutic generalist and multi-task models: Tx-LLM [12], LlaSMol [23], and MolE [24]. Tx-LLM, with its two size-variants S and M, shares similar training data to our approach enabling a direct comparison across all tasks. LlaSMol a suite of generalist models built upon fine-tuned open-source LLMs trained for small-molecule applications [23]. Similarly, MolE was developed as a graph-based multi-task foundation model for small molecules. LlaSMol and MolE, specialized for small molecules, offer strong baselines for small molecule tasks.", + "bbox": [ + 109, + 506, + 887, + 674 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "TxGemma shows improved performance compared to therapeutic generalist models In Figure 3, we compare the performance of TxGemma-27B-Predict to the two existing models in the Tx-LLM [12] family, Tx-LLM M and Tx-LLM S, built over PaLM-2 on TDC tasks. TxGemma-27B-Predict surpasses Tx-LLM M on 45 out of 66 tasks, while underperforming on 21. In addition, it outperforms Tx-LLM S on 62 and underperforms Tx-LLM S on only 4. Aggregating performance over task, we observe a statistically significant improvement of TxGemma-27B-Predict over Tx-LLM models $(p = 0.003$ , Wilcoxon signed-rank test). These results demonstrate that TxGemma provides a highly competitive alternative to its predecessor with improved functionality at a substantially reduced model size.", + "bbox": [ + 109, + 678, + 887, + 801 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "TxGemma is competitive with specialist therapeutic models Figure 4 and Figure S.4 compare TxGemma's performance with best-in-class specialist model across tasks containing various combinations of SMILES, amino acid, nucleotide, and text inputs. In a comparison with specialist best-in-class models, TxGemma-27B-Predict outperforms the state-of-the-art (SOTA) on 26 and performs near SOTA on 50. This is a substantial improvement over its predecessor Tx-LLM M, which outperformed SOTA on 22 tasks and near SOTA on 43. These results demonstrate the improved capabilities of TxGemma-27B-Predict and its competitiveness with current specialist models designed for specific tasks and therapeutic feature types.", + "bbox": [ + 109, + 806, + 887, + 912 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 870, + 936, + 883, + 948 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e02f5ed44e79ec81692d41c9c6fe4b312e200e4239d7e952545b07549038790e.jpg", + "image_caption": [ + "Figure 3 | Comparison of TxGemma-Predict's performance with therapeutic generalist models. (top) relative performance improvement of TxGemma-27B-Predict in comparison to Tx-LLM S. TxGemma-27B-Predict outperforms Tx-LLM S on 62 and underperforms on only 4. (bottom) relative performance improvement of TxGemma-27B-Predict in comparison to Tx-LLM M. TxGemma-27B-Predict outperforms Tx-LLM M on 45 out of 66 tasks, while underperforming on 21. When aggregating performance over task, we observe a net improvement of TxGemma-27B-Predict over Tx-LLM models, with a statistically significant difference $(p = 0.003$ , Wilcoxon signed-rank test). These results establish TxGemma-27B-Predict as a competitive and functionally enhanced alternative at practical model sizes. Values for each task can be found in Tables S.15 and S.16." + ], + "image_footnote": [], + "bbox": [ + 114, + 87, + 883, + 498 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "TxGemma performs similarly to multi-task models specialized for small molecules Table 1 and Figure S.6 compare the predictive performance of TxGemma-27B-Predict with MolE, a graph-based multi-task foundation model for small molecules. MolE performs within the $95\\%$ CIs of TxGemma-27B-Predict for 15 out of 22 tasks. Furthermore, both TxGemma-27B-Predict and TxGemma-9B-Predict outperform LlaSMolMistral (7B), the top performing model from the LlaSMol suite, on 2 of 5 shared tasks and within $95\\%$ CIs on 2 additional tasks (Table 2 and Figure S.5). All metrics for MolE and LlaSMol are reported from Mendez-Lucio et al. [24] and Yu et al. [23]. Given their specialization in small-molecule tasks, LlaSMol and MolE provide strong baselines for evaluating generalist models. Notably, TxGemma, a generalist model encompassing diverse drug types and many different tasks, achieves competitive performance with these dedicated models designed for a narrower range of small-molecule tasks.", + "bbox": [ + 109, + 676, + 888, + 833 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.2 TxGemma Conversational Capabilities", + "text_level": 1, + "bbox": [ + 109, + 840, + 493, + 859 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "While TxGemma-27B-Predict performs well on prediction tasks, training solely on instruction tuning data for therapeutic properties limits its conversational capacity. TxGemma-27B-Predict can engage in general", + "bbox": [ + 109, + 868, + 885, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 870, + 936, + 883, + 948 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ebfa122c4e8e4aaa8d9e1067f57d4762781618f9e07826cee01379d3677e3b2b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 158, + 88, + 380, + 258 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9b152fe3c52d378f2d027185f60d5bd699d4d295bd539075317ae94dfc9ac7a1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 388, + 88, + 612, + 258 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2c3dedc41b7c9e61f949673cd15587fced5ab7c9ed1cd38b27f8cc2f630441e6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 616, + 89, + 841, + 258 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1996f43f70147e885c9ca69f311159a6643604936abffe7afc9aded5bd4946cc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 156, + 263, + 380, + 433 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/3cd0a375ee3d7de3a86068d0c489eab4f7df74f4ad4e1134db7d492a2c938c19.jpg", + "image_caption": [], + "image_footnote": [ + "SMILES" + ], + "bbox": [ + 388, + 263, + 612, + 431 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5019a41ee3d7b9b09cfa6b183c6cca835c79d3fc3f8b7e470d8b3d39c395d574.jpg", + "image_caption": [], + "image_footnote": [ + "SMILES + Text" + ], + "bbox": [ + 616, + 263, + 841, + 433 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/16baf39adc18c0e4e4aa59b45fb1b03adc1282067ff3e5c3d12ac88c14a15786.jpg", + "image_caption": [ + "Figure 4 | Comparison of TxGemma's performance with best-in-class specialist models. TxGemma-27B-Predict is evaluated on each task in TDC and compared to the corresponding best-in-class competitor. The panels depict different metrics used to evaluate the tasks. Tasks are colored by their feature types including one or a combination of SMILE, Amino acid, Nucleotide and text as indicated in the legend. Marker sizes illustrate the number of data points in the task on a log scale. The larger shaded area in blue indicates where TxGemma outperforms best-in-class models, while the narrower light blue shaded area indicates where TxGemma is performing near best-in-class model (defined as within $10\\%$ ). MAE and MSE values are log-transformed since the magnitudes of these values depend on the units of outputs. Generation accuracy is the fraction of correct SMILES strings in the USPTO generation task. Values for each task can also be found in Tables S.13 and S.14." + ], + "image_footnote": [], + "bbox": [ + 150, + 439, + 377, + 608 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8b46427efa8d29aead6de741c144673fa93c545121e1a5c5fd8f01be42910034.jpg", + "image_caption": [], + "image_footnote": [ + "Amino acid", + "Nucleotide + Amino acid", + "Amino acid + Text", + "Amino acid + SMILES", + "Nucleotide" + ], + "bbox": [ + 379, + 439, + 606, + 608 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "conversation, but its performance deteriorates when prompts deviate from the expected format. Figure S.9 shows an example of such decline in TxGemma-27B-Predict's conversational capabilities. To expand the TxGemma family's capabilities and provide a more versatile tool with the ability to explain its reasoning, we trained TxGemma-Chat with a mix of therapeutic and general instruction-tuning data as detailed in Section 2.2. We evaluate these new conversational capabilities through a combination of standard LLM benchmarks and qualitative examples. We also run our models through assurance evaluations, as done for Gemma-3 [25], to verify that TxGemma models adhere to safety policies.", + "bbox": [ + 109, + 796, + 887, + 905 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 870, + 936, + 883, + 948 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/97948ef7ade23a9c58cba5b5f186c03c35fd709d4e7765967ffad5cb11de2ff1.jpg", + "table_caption": [ + "Table 1 | Comparative performance of TxGemma and MolE on small molecule tasks. Details of the predictive performance of TxGemma-27B-Predict and MolE, a graph-based molecular multi-task foundation model, across various pharmacokinetics and toxicity tasks. Bold values indicate the best performance for each task. Metrics for MolE are reported from Mendez-Lucio et al. [24]. TxGemma-27B-Predict values are bootstrapped averages and $95\\%$ CIs. These pharmacokinetics and toxicity tasks are publicly available in TDC [7]." + ], + "table_footnote": [], + "table_body": "
Task TypeTaskMetricMolE [24]TxGemma-27B-Predict
PharmacokineticsCaco2 WangMAE (↓)0.3290.401 (0.358-0.449)
Lipophilicity AstraZenecaMAE (↓)0.4060.538 (0.507-0.570)
Solubility AqSolDBMAE (↓)0.7760.907 (0.870-0.948)
PPBR AZMAE (↓)7.2299.048 (8.141-10.111)
HIA HouAUROC (↑)0.9840.988 (0.972-0.999)
Pgp BroccatelliAUROC (↑)0.9300.937 (0.904-0.964)
Bioavailability MaAUROC (↑)0.6400.694 (0.575-0.801)
BBB MartinsAUROC (↑)0.9030.908 (0.872-0.938)
CYP3A4 Substrate CarbonMangelsAUROC (↑)0.6920.691 (0.601-0.784)
CYP2D6 VeithAUPRC (↑)0.6790.683 (0.639-0.726)
CYP3A4 VeithAUPRC (↑)0.8760.854 (0.836-0.872)
CYP2C9 VeithAUPRC (↑)0.7820.798 (0.767-0.826)
CYP2D6 Substrate CarbonMangelsAUPRC (↑)0.6920.711 (0.570-0.830)
CYP2C9 Substrate CarbonMangelsAUPRC (↑)0.4090.438 (0.302-0.576)
VDss LombardoSpearman (↑)0.6440.559 (0.457-0.655)
Half Life ObachSpearman (↑)0.5780.458 (0.306-0.594)
Clearance Microsome AZSpearman (↑)0.6320.462 (0.353-0.565)
Clearance Hepatocyte AZSpearman (↑)0.4560.260 (0.129-0.384)
ToxicityLD50 ZhuMAE (↓)0.6020.627 (0.597-0.660)
hERGAUROC (↑)0.8350.885 (0.813-0.946)
AMESAUROC (↑)0.8340.816 (0.795-0.838)
DILIAUROC (↑)0.8520.886 (0.810-0.947)
", + "bbox": [ + 119, + 179, + 883, + 569 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/d2323cf4b228f6cb908f1814d922975c4028f6cd3bba08c079fa90cf5aa14728.jpg", + "table_caption": [ + "Table 2 | Comparative performance of TxGemma and LlaSMol on small molecule tasks. Comparison of TxGemma-27B-Predict with LlaSMolMistral (best LlaSMol model at 7B) across shared small-molecule tasks. Bold values indicate the best performance for each task. Metrics for LlaSMolMistral are reported from Yu et al. [23]. TxGemma-Predict values are bootstrapped averages and $95\\%$ CIs. These pharmacokinetics, toxicity, and high-throughput screening data and tasks are publicly available in TDC [7]" + ], + "table_footnote": [ + "* To predict whether compounds have anti-HIV properties.", + "† Task name is modified to match the nomenclature from Yu et al. [23]." + ], + "table_body": "
Task TypeTaskMetricLlaSMolMistral [23]TxGemma-27B-PredictTxGemma-9B-Predict
PharmacokineticsBBBP†Accuracy (↑)0.7460.869 (0.835-0.901)0.847 (0.813-0.881)
ESOL†RMSE (↓)1.1501.250 (1.185-1.321)1.360 (1.246-1.480)
Lipo†RMSE (↓)1.0100.710 (0.668-0.752)0.742 (0.700-0.787)
ToxicityClintoxAccuracy (↑)0.9310.926 (0.896-0.956)0.925 (0.892-0.953)
High-throughput screeningHIV*Accuracy (↑)0.9670.968 (0.964-0.972)0.965 (0.961-0.969)
", + "bbox": [ + 112, + 672, + 890, + 797 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "TxGemma-Chat bridges the gap between property predictors and general language models To assess the performance of TxGemma-Chat as a general conversational LLM, we evaluated it on the Massive Multitask Language Understanding (MMLU) [26] benchmark, a comprehensive suite of 57 diverse tasks spanning mathematics, history, computer science, law, etc. This benchmark evaluates knowledge, reasoning,", + "bbox": [ + 109, + 852, + 888, + 914 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 870, + 936, + 883, + 948 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/cff609f1cff46876c76c043fcbdff25eab187a12d98a2bd260d1ab40fc7d3e3e.jpg", + "image_caption": [ + "Figure 5 | TxGemma-Chat bridges the gap between property predictors and general LLMs. Each point represents a therapeutic task in the TDC. The figure depicts relative predictive performance changes of TxGemma-Chat in comparison to TxGemma-Predict (top) and Gemma-2 (bottom) for 9B variants left and 27B variants in right. As expected, TxGemma-27B-Predict outperforms TxGemma-27B-Chat on therapeutic tasks, with TxGemma-27B-Chat showing a $10.69\\%$ median relative performance reduction. However, TxGemma-27B-Chat exceeds the Gemma-2-27B baseline by $29.67\\%$ on TDC therapeutic tasks. Similarly, TxGemma-9B-Chat's performance is $10.32\\%$ lower than TxGemma-9B-Predict's. Values for each task can be found in Tables S.15 and S.16." + ], + "image_footnote": [], + "bbox": [ + 119, + 89, + 486, + 250 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/29542d68bc684f7ee0c7db74899e9f4990c401e026299fb8377fbbe2aa9ea653.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 90, + 883, + 250 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "and problem-solving abilities across a wide range of academic subjects, providing a measure of overall language understanding. It comprises 14,079 multiple-choice questions, each with four possible answers. For this multiple-choice format, we took the model's prediction as the option with the highest log-likelihood in a zero-shot setting and report overall accuracy as well as per-subject accuracy.", + "bbox": [ + 109, + 407, + 883, + 469 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Figure S.7 compares the performance of TxGemma-27B-Chat, TxGemma-27B-Predict, and Gemma-2-27B on MMLU, a standard benchmark for evaluating general LLMs. TxGemma-27B-Chat achieves an accuracy of $73.87\\%$ , slightly lower than Gemma-2-27B's $75.38\\%$ , but TxGemma-27B-Chat shows slight improvements in areas such as medical genetics, high school statistics, and college chemistry. Furthermore, TxGemma-27B-Chat significantly outperforms TxGemma-27B-Predict, which has an accuracy of $53.60\\%$ . This suggests that while fine-tuning solely on therapeutic data can diminish general knowledge acquired during pre-training, incorporating general instruction-tuning data can mitigate this effect.", + "bbox": [ + 109, + 473, + 888, + 582 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Furthermore, we assess TxGemma-27B-Chat on all therapeutic tasks within TDC. Figure 5 compares the relative performance changes of TxGemma-27B-Chat to TxGemma-27B-Predict and Gemma-2-27B for both 9B and 27B variants across these tasks. As anticipated, TxGemma-27B-Predict outperforms TxGemma-27B-Chat on these predictive tasks, with a median relative performance reduction of $11\\%$ observed for TxGemma-27B-Chat. Nevertheless, TxGemma-27B-Chat surpasses the baseline Gemma-2-27B, demonstrating a median relative improvement of $30\\%$ . Similarly, TxGemma-9B-Chat shows a $10\\%$ median relative performance reduction compared to TxGemma-9B-Predict. Regression tasks experience the greatest performance decline from the general-purpose training. These results demonstrate how TxGemma-Chat bridges the gap between therapeutic property predictors and general LLMs, functioning as a unified model for both capabilities.", + "bbox": [ + 109, + 585, + 888, + 724 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "TxGemma-Chat can provide reasoning for complex tasks. A particularly compelling application of conversational models lies in prompting them to explain their predictions to users. While general LLMs may possess some foundational knowledge concerning therapeutic challenges, they are not accurate for property prediction (Figure 5). In Figure 6, we prompt TxGemma-27B-Chat to answer a question regarding blood-brain barrier permeability using the BBB Martins prompt format. TxGemma-27B-Chat provides only the answer in the initial turn, but when given a subsequent prompt to articulate its rationale, the model provides mechanistic reasoning for its answer based on molecular solubility and the structure of the input molecule derived from the SMILES string. All of this reasoning occurred directly within the model weights, without requiring any preprocessing of the SMILES string.", + "bbox": [ + 109, + 728, + 888, + 867 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Interestingly, prompting structures enable TxGemma-Chat to provide additional reasoning on complex tasks. For instance, while the relationship between blood-brain barrier permeability and lipophilicity is intuitive, some", + "bbox": [ + 109, + 869, + 888, + 902 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 862, + 936, + 885, + 949 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/327ad9521c9b1211442d7f2d946b5d27f8bc523409af6030fb32861ad17716a2.jpg", + "table_caption": [ + "Table 3 | Performance of Agentic-Tx. Accuracy of Agentic-Tx compared with SOTA models on ChemBench, GPQA, and HLE benchmarks." + ], + "table_footnote": [ + "$(\\dagger)$ Using ReAct framework, $(^{*})$ Extracted from [1], $(^{**})$ Extracted from [2]" + ], + "table_body": "
ModelChemBenchGPQA (Diamond)Humanity's Last Exam
MiniPreferenceChemistryChemistry & Biology
Agentic-Tx (Gemini 2.5-Pro)84.566.281.720.1
Agentic-Tx (Gemini 2.0-Pro)83.465.562.414.5
Agentic-Tx (Gemini 1.5-Pro)80.665.051.811.9
Claude-3.5 (Sonnet)73.0*60.0*†40.4-
GPT-4o72.0*59.0*43.8**3.8
Gemini 2.5-pro82.865.579.517.9
Gemini 2.0-pro79.658.453.311.1
Gemini 1.5-pro74.955.648.210.6
PaperQA2 [28]67.0*56.0*--
o180.0*56.0*64.7**12.3
o3-mini (medium)82.461.362.513.0
o3-mini (high)82.562.064.513.2
Human Expert (Average Performance)27.0---
", + "bbox": [ + 143, + 132, + 857, + 397 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "tasks such as predicting clinical trial approval are more challenging to reason over. If TxGemma-27B-Chat is prompted to provide reasoning in the same manner as in Figure 6 for predicting clinical trial approval, TxGemma-27B-Chat refuses and directs the user to alternative sources. However, when modifying the original prompt, instructing the model to output reasoning steps before the final answer, it bypasses the refusal and restores reasoning capabilities (Figure S.10).", + "bbox": [ + 109, + 441, + 887, + 518 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "3.3 Agentic Planning and Execution based on TxGemma", + "text_level": 1, + "bbox": [ + 109, + 530, + 624, + 546 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Agentic-Tx demonstrates competitive performance on therapeutic benchmarks. We evaluate the capability of Agentic-Tx to assist with therapeutics tasks by means of questions from three benchmarks: GPQA (Diamond) [27], ChemBench [1], and Humanity's Last Exam (HLE) [15]. Within each benchmark, we use existing selections of therapeutic-relevant questions; for GPQA we evaluate GPQA-Chemistry (47 questions), for ChemBench we evaluate ChemBench-Chemical Preference which aims to select an ideal candidate molecule for therapeutic development (1,001 question) and ChemBench-mini, which evaluates across 8 categories of chemistry from toxicity/safety to organic chemistry (236 questions). Finally, for HLE, we evaluate HLE-Chemistry and HLE-Biology (235 questions). For open-ended questions in HLE, we observed a high variation of metric scores depending on the selection of the LLM-rater model [15]. To ensure an objective accuracy measure, we restrict the evaluation to multiple choice questions (MCQs).", + "bbox": [ + 107, + 556, + 887, + 709 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "As shown in Table 3, Agentic-Tx (Gemini 2.5-Pro), Agentic-Tx (Gemini 2.0-Pro), and Agentic-Tx (Gemini 1.5-Pro) achieve competitive or greater accuracy compared to existing SOTA models across several benchmarks. Specifically, Agentic-Tx (Gemini 2.5-Pro) and Agentic-Tx (Gemini 2.0-Pro) surpasses prior SOTA models on the exceptionally difficult Humanity's Last Exam benchmark (Chemistry & Biology tasks), with Agentic-Tx (Gemini 2.5-Pro) achieving $52.3\\%$ relative improvement over o3-mini (high) and $13.4\\%$ over Gemini 2.5-pro, as well as on ChemBench, with relative improvements of $6.3\\%$ (ChemBench-Preference) and $2.4\\%$ (ChemBench-Mini) over o3-mini (high) and $1.1\\%$ (ChemBench-Preference) and $2.0\\%$ (ChemBench-Mini) over Gemini 2.5-pro. On GPQA (Diamond), Agentic-Tx also achieves SOTA accuracy with $26.7\\%$ relative improvements over o3-mini and $2.7\\%$ over Gemini 2.5-pro. All variants of Agentic-Tx outperform their corresponding base Gemini models across all benchmarks, indicating the effectiveness of the Agentic-Tx framework in enhancing LLMs efficacy for advanced reasoning within this domain. This suggests that agentic workflows such as ours represent useful tools for therapeutic development, particularly in areas requiring domain knowledge and the selection of candidate molecules. The agent's ability to leverage external tools and perform multi-step", + "bbox": [ + 109, + 713, + 887, + 912 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 862, + 936, + 883, + 949 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Instructions: Answer the following question about drug properties.", + "bbox": [ + 138, + 95, + 500, + 108 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Context: As a membrane separating circulating blood and brain extracellular fluid, the blood-brain barrier (BBB) is the protection layer that blocks most foreign drugs. Thus the ability of a drug to penetrate the barrier to deliver to the site of action forms a crucial challenge in development of drugs for central nervous system.", + "bbox": [ + 138, + 109, + 730, + 143 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Question: Given a drug SMILES string, predict whether it", + "bbox": [ + 138, + 146, + 442, + 157 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "(A) does not cross the BBB (B) crosses the BBB", + "bbox": [ + 138, + 157, + 392, + 167 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Drug SMILES: C1CN(CCC1(C=O)NC(CCO)C2=CC=C(C=C2)Cl)N)C3=NC=NC4=C3C=CN44", + "bbox": [ + 138, + 172, + 611, + 184 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "TxGemma", + "text_level": 1, + "bbox": [ + 263, + 208, + 326, + 220 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Answer: (B)", + "bbox": [ + 264, + 224, + 330, + 237 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Explain your reasoning based on the molecule structure.", + "bbox": [ + 138, + 256, + 439, + 268 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "TxGemma", + "text_level": 1, + "bbox": [ + 263, + 282, + 326, + 296 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The molecule has several properties that suggest it can cross the blood-brain barrier (BBB):", + "bbox": [ + 264, + 304, + 756, + 316 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Lipophilicity: The molecule contains multiple aromatic rings and a long alkyl chain, contributing to its lipophilicity. Lipophilicity is a key factor in BBB penetration, as the barrier is primarily composed of lipids.", + "bbox": [ + 264, + 325, + 857, + 349 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Molecular weight: The molecular weight is within a range (around 400 Da) that is generally considered favorable for BBB permeability.", + "bbox": [ + 264, + 352, + 856, + 375 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Hydrogen bond donors and acceptors: While there are hydrogen bond donors and acceptors present, they are not excessive, which can sometimes hinder BBB passage.", + "bbox": [ + 264, + 378, + 856, + 401 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Lack of charged groups: The molecule lacks any charged groups at physiological pH, making it less likely to be repelled by the BBB's tight junctions.", + "bbox": [ + 264, + 402, + 856, + 426 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Overall, the combination of lipophilicity, moderate molecular weight, balanced hydrogen bonding capacity, and lack of charged groups suggests that this molecule has a good chance of crossing the blood-brain barrier.", + "bbox": [ + 264, + 429, + 856, + 452 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "What makes passing through the blood-brain barrier different from passive diffusion through a membrane?", + "bbox": [ + 137, + 474, + 712, + 486 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "TxGemma", + "text_level": 1, + "bbox": [ + 263, + 502, + 326, + 513 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "While passive diffusion is a major mechanism for molecules to cross the blood-brain barrier (BBB), there are key differences that make BBB passage more challenging and selective:", + "bbox": [ + 264, + 527, + 856, + 550 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Tight Junctions: The cells forming the BBB are packed very tightly together with specialized structures called tight junctions. These junctions seal the spaces between cells, significantly reducing the gaps through which molecules can passively diffuse.", + "2. Specialized Transport Systems: The BBB has a variety of specialized transport systems that actively move specific molecules across the barrier. These systems are highly selective and often require molecules to have specific structural features or be recognized by specific receptors.", + "3. Efflux Pumps: The BBB also expresses efflux pumps, which are proteins that actively pump molecules back out of the brain. This mechanism helps to prevent potentially harmful substances from entering the brain and maintain a stable brain environment.", + "4. Metabolic Enzymes: The BBB contains enzymes that can metabolize and inactivate certain molecules, further limiting their entry into the brain.", + "5. Astrocyte Endfeet: Astrocytes, a type of glial cell, have projections called endfeet that surround blood vessels in the brain. These endfeet contribute to the BBB's barrier function and regulate the passage of molecules." + ], + "bbox": [ + 271, + 551, + 856, + 704 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In summary, while passive diffusion plays a role in BBB permeability, the presence of tight junctions, specialized transport systems, efflux pumps, metabolic enzymes, and astrocyte endfeet make the BBB a highly selective barrier that restricts the entry of many molecules. This selectivity is crucial for protecting the brain from harmful substances and maintaining a stable internal environment, but it also poses challenges for drug delivery to the central nervous system.", + "bbox": [ + 264, + 708, + 856, + 763 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Is my molecule likely to run into these issues?", + "bbox": [ + 143, + 786, + 390, + 797 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "TxGemma", + "text_level": 1, + "bbox": [ + 263, + 813, + 326, + 825 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Let's analyze how your molecule might be affected by the specific challenges of the BBB ...", + "bbox": [ + 264, + 830, + 750, + 842 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Figure 6 | TxGemma-Chat can be prompted to explain its reasoning or provide additional context. Following an initial question, TxGemma-27B-Chat is asked to explain its reasoning based on molecule structure in the second turn. The model uses its understanding of chemistry and biology to justify its answer and can continually engage with the user on follow-up questions.", + "bbox": [ + 109, + 862, + 885, + 922 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "reasoning enables it to address more complex queries beyond the scope of traditional LLMs.", + "bbox": [ + 109, + 90, + 771, + 104 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Agentic-Tx effectively leverages various tools based on the therapeutic task requirement. In Figure S.14, we investigate tool usage frequency within the Agentic-Tx system across the ChemBench-Preference and Biology and Chemistry (B&C) HLE datasets. Our analysis reveals that Agentic-Tx tool usage distribution varies significantly depending on the task and dataset. For the ChemBench-Preference task, which focuses on selecting ideal candidate molecules for therapeutic development, the Agentic-Tx system exhibits a high frequency of usage for tools such as SMILES description and toxicity prediction. This suggests a strong emphasis on molecular characterization and safety assessment in this task correctly invoked by Agentic-Tx. In contrast, on the B&C HLE dataset, tool usage is predominantly concentrated on general knowledge retrieval tools like PubMed or Wikipedia search. This indicates that the Agentic-Tx system relies heavily on accessing and synthesizing broad biological or chemical knowledge to address questions in these domains. In Figure S.15, we investigate the breakdown of tool interactions per question and explore how these interactions contribute to performance variations. Our analysis shows that each question can involve up to 8 tool calls, and the high usage of tools such as SMILES description and toxicity prediction tools correlates with overall performance improvement. These results highlight the Agentic-Tx system's adaptive nature, demonstrating its ability to leverage different tools based on the specific requirements of the task.", + "bbox": [ + 109, + 109, + 887, + 339 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Agentic-Tx inference time is suitable for real time human interaction Analysis of Agentic-Tx's inference time indicates efficient performance characteristics. The median time observed for tool execution is 0.55 seconds. The fastest tool (Gene Sequence) completes execution in 0.15 seconds, while the slowest (ToxCast) requires 28.2 seconds. This suggests that Agentic-Tx operates within a timeframe conducive to real-time user interaction. The observed latencies demonstrate suitability for integration into workflows where immediate feedback and responsiveness are desired. The system's ability to maintain a median inference time below one second contributes to an efficient user experience.", + "bbox": [ + 109, + 349, + 885, + 455 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "3.4 Additional Analysis and Ablations", + "text_level": 1, + "bbox": [ + 109, + 469, + 455, + 484 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Data contamination analysis and data leakage considerations To assess potential data contamination from the Gemma-2 pretraining data, we calculated the overlap between features in the therapeutic instruction-tuning data and the pretraining corpus. For multi-instance tasks, contamination was defined as the presence of any constituent feature (e.g., drug SMILES or target protein sequence in drug-target binding) in the pretraining data. The majority of tasks showed no direct contamination (Figure S.12). For tasks with some contamination, filtering contaminated datapoints and recalculating TxGemma-27B-Predict performance revealed no significant changes (Figure S.13).", + "bbox": [ + 109, + 496, + 885, + 602 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "While direct contamination was minimal, we further investigated potential indirect contamination. Although SMILES strings are less common in general web text, pretraining on molecular names could have created learned associations between names and SMILES, potentially influencing test set performance. To test this, we compared the similarity of TxGemma-27B-Predict embeddings for PubChem molecules represented as SMILES strings and their corresponding IUPAC names, against the similarity of embeddings for SMILES strings paired with decoy (randomly selected, incorrect) names. The similarities were statistically equivalent (Figure S.12), confirmed by a two one-sided t-test $(p = 3 \\times 10^{-12}$ , $\\delta = 0.02)$ . This suggests that TxGemma-27B-Predict did not learn spurious name-SMILES associations during pretraining, likely because names and SMILES were encountered in separate training phases and for different molecules. Therefore, both direct and indirect contamination from pretraining are unlikely to significantly affect our results.", + "bbox": [ + 109, + 607, + 885, + 760 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Fine-tuning TxGemma models improves data efficiency. Given the scarcity of therapeutic data and the potential of TxGemma to serve as a pretrained model for further adaptation, we investigated TxGemma's data efficiency and generalization to new tasks in out-of-distribution settings. Specifically, we fine-tuned the baseline model Gemma-2-27B as well as our TxGemma-27B-Predict on adverse event prediction data from TrialBench [29]. Serious adverse events are critical in assessing the safety profile of a new treatment and accurate prediction of these events allows for better risk management and resource allocation [29]. To ensure a fair evaluation of generalization, we filtered the TrialBench test set to exclude samples overlapping with phase 1, 2, or 3 of clinical trial outcome prediction data in TDC. In addition, datapoints without available SMILES strings are excluded. This lead to 14,368 train and 3,184 test samples.", + "bbox": [ + 109, + 765, + 885, + 902 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 862, + 936, + 883, + 949 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/4dfa44b4877c0dff07f7e32085f8af1e800f4ac8bc4d1d5740a2f5789b0e102d.jpg", + "image_caption": [ + "Figure 7 | TxGemma improves efficiency at adverse event prediction from SMILES strings. The figure shows the AUROC of predicting adverse events in a clinical trial from the drug SMILES strings as a function of the training data fraction for Gemma-2-27B and TxGemma-27B-Predict. Clinical trials are separated based on trial phase, and datapoints without available SMILES strings are excluded. To assess model performance with additional textual information, separate models trained on both SMILES strings and additional textual information are indicated by colored dashed lines, and SOTA models are indicated by gray dashed lines. (S) denotes models trained with SMILES strings only, and $(\\mathrm{S} + \\mathrm{T})$ those trained with SMILES and textual information (Table S.10)." + ], + "image_footnote": [ + "Gemma-27B (S) $\\rightarrow$ TxGemma-27B-Predict (S) --- Gemma-27B (S+T) —— TxGemma-27B-Predict (S+T) --- Best-in-class (S+T)" + ], + "bbox": [ + 117, + 89, + 313, + 257 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/868848d097319f43df2f2324861cfe2384f376dcae1a95d6563e3f7b212d5ba8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 318, + 90, + 500, + 257 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/6eb51e87119992d386790213cb24f8ca1436c165f652aa4c124e908de08b04eb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 90, + 689, + 256 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/0ab979193ce5715dcc7b650b9114e63d4a217e3ebcd07ca293810378aa73bd12.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 696, + 90, + 880, + 257 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We consider two settings. Initially, we focus exclusively on drug SMILES strings as the only feature contributing to clinical trial outcome, thereby isolating the influence of therapeutic information by excluding this additional context. To simulate data limitations, we fine-tuned TxGemma-27B-Predict and the baseline Gemma-2-27B on varying fractions of the training data, and then evaluated the newly fine-tuned models performance on the test set after 30 epochs of training (Figure 7). Overall, TxGemma-27B-Predict achieved higher AUROCs with lower amounts of training data, matching the performance of Gemma-2-27B with less than $10\\%$ of retraining data. In the second setting, we explored the performance ceiling by incorporating textual information about the clinical trials, increasing the number of tokens provided to the models by a factor of 4 (Table S.10). This is the setting used by the best-in-class model for adverse event prediction [29]. The addition of textual information allowed our models to consistently outperform existing SOTA methods [29]. However, the performance difference between TxGemma-27B-Predict and Gemma-2-27B decreased in this scenario because the additional textual information diluted the relative importance of the drug SMILES strings.", + "bbox": [ + 107, + 444, + 888, + 628 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "TxGemma inference time is suitable for virtual screening In Figure S.11, we plot the inference speed of TxGemma models of all sizes normalized by the number of TPUv5e chips used for serving. All model sizes are suitably fast for virtual screening, as even the largest 27B model is able to inference around 9,000 samples per day per TPU chip. Using 64 chips for serving, this would yield around 600,000 samples per day for the 27B model, and the smallest 2B model would reach 3,000,000 samples per day.", + "bbox": [ + 107, + 632, + 887, + 709 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Correlation between clinical trial approval and toxicity predictions We investigated the correlation between TxGemma's clinical trial approval predictions (based on SMILES and target disease) and its toxicity predictions (using TDC's AMES, DILI, and hERG tasks). Figure S.18 shows a consistent, but weak (0.15-0.35), positive Spearman correlation across all phases. This suggests TxGemma associates lower predicted toxicity with approval, but may also consider other factors such as efficacy or drug-likeness.", + "bbox": [ + 107, + 714, + 887, + 791 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Impact of feature types Figure S.16 presents a performance breakdown of TxGemma-27B-Predict by feature type, compared to Tx-LLM M. In both models, tasks incorporating both SMILES strings and textual features (e.g., disease names, cell line names/description) show the most significant improvement over SOTA. This suggests that the contextual knowledge acquired during LLM pretraining could aid in synthesizing textual information with molecular representations.", + "bbox": [ + 107, + 795, + 887, + 873 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Model size and domain fine-tuning ablations Figure S.17 compares the performance of TxGemma-Predict models across different sizes (2B, 9B, and 27B) on TDC tasks. Pairwise comparisons using the Wilcoxon", + "bbox": [ + 109, + 877, + 887, + 909 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 862, + 936, + 885, + 949 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "signed-rank test indicate that model size is a significant factor: TxGemma-27B-Predict outperforms TxGemma-9B-Predict $(p = 0.013)$ and TxGemma-2B-Predict $(p = 6.2 \\times 10^{-6})$ , and TxGemma-9B-Predict outperforms TxGemma-2B-Predict $(p = 0.048)$ . Furthermore, comparing TxGemma models to their corresponding base Gemma-2 models reveals the significant impact of domain fine-tuning. All TxGemma models significantly outperform their Gemma-2 counterparts $(p < 10^{-10}$ , Wilcoxon signed-rank test), underscoring the importance of specialized training for therapeutic tasks.", + "bbox": [ + 109, + 89, + 887, + 183 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "4 Related work", + "text_level": 1, + "bbox": [ + 109, + 199, + 267, + 215 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Task-specific models for chemistry and therapeutics. In recent years, there has been a surge in the development of deep learning models designed for various chemistry applications. Amongst those, graph neural networks (GNNs) have been applied for a wide variety of molecular prediction or generation tasks because small molecules are naturally represented as graphs [30, 31, 32, 33, 34, 35, 36, 37, 24]. Another common representation for small molecules is molecular fingerprints [38], which are binary vectors that capture the local environment of each atom [30, 39, 40].", + "bbox": [ + 109, + 228, + 887, + 321 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "TxGNN trained a GNN on medical knowledge graphs in order to perform zero-shot drug repurposing for diseases with limited treatment options [41]. AlphaFold and its successors have also significantly advanced the field of protein structure prediction and protein design [42, 43, 44, 45, 46]. These models have been influential for both mechanistic research and the development of structure-based drugs [47].", + "bbox": [ + 109, + 325, + 885, + 388 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Large language models for biology and chemistry. Transformer-based models [48] have fueled the development of LLMs, which are trained on massive textual datasets with subsequent instruction-tuning [49] or alignment [50]. LLMs have demonstrated exceptional proficiency in various tasks, including text summarization, translation, and question answering [16, 51, 52]. Their ability to encode vast amounts of information and generalize to new tasks has sparked considerable interest in their potential applications across diverse domains.", + "bbox": [ + 109, + 392, + 888, + 470 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "There has been increasing interest in applying the development for LLMs to scientific research. BrainGPT fine-tuned LLMs on neuroscience literature and found greater performance than domain experts [53]. LlaSMol fine-tuned LLMs on small molecule datasets and achieved near-SOTA performance on multiple tasks [23]. CLAMP used separate modules for natural language and molecular inputs, combining them together in a contrastive pre-training objective [54]. Protein language models [55, 56, 57, 58] and genomic language models [59, 60, 61] have used self-supervised pretraining to generate embeddings useful for downstream tasks. ProtLLM [62], BioT5 [63], and GraphToken [64] combine molecule or proteins with LLMs using textual or multi-modal strategies. Cellular foundation models such as scGPT [65], GenePT [66], Geneformer [67], Nicheformer [68], and Cell2Sentence [69] represent cells based on their gene expression to differentiate cell types and understand gene perturbations. NatureLM [70] trained a foundation model that represents small molecules, proteins, RNA, and materials as sequences over a wide variety of scientific tasks.", + "bbox": [ + 109, + 473, + 887, + 642 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Agentic Systems. Unlike traditional passive models, agentic systems proactively choose actions to achieve goals [71, 72, 73, 74, 75], involving planning [76, 77, 78, 79, 80] and interaction with external tools [81, 82, 83, 84]. LLMs have enabled such systems by processing complex information and generating action-driving responses. The ReAct framework [22] combines reasoning, action, and observation, with variations incorporating self-reflection [85] or model architectures for internal tool usage [82]. Agentic frameworks enable automating tasks like software development [73, 86, 87, 88] and scientific research [89, 90, 91] including biomedical applications such as nanobody design [90], drug discovery [92], or reaction optimization [93]. ChemCrow [92] is an agent designed to perform chemistry experiments in drug discovery and materials design. The coscientist by Boiko et al. [93] designs and performs chemical experiments by integrating web knowledge, code execution, and experiment automation, demonstrating successful reaction optimization of palladium-catalysed cross-couplings. The multi-agent system AI co-scientist [88] is designed for hypothesis generation over a variety of scientific fields. TxAgent was developed as an agentic framework that provides multi-step reasoning and tool use aimed towards therapeutic applications, processing clinical information to support tasks like treatment recommendation [94]. In contrast to recommending existing therapeutics, Agentic-Tx generally focuses on developing new therapeutics.", + "bbox": [ + 109, + 646, + 887, + 876 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 862, + 936, + 883, + 949 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "5 Discussion", + "text_level": 1, + "bbox": [ + 112, + 89, + 238, + 106 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "TxGemma's performance suggests a paradigm shift in therapeutic AI development, demonstrating the viability of generalist LLMs. Despite the established dominance of specialist models in niche areas, TxGemma, a relatively lightweight and efficient generalist, achieves competitive results across a wide array of therapeutic tasks. This highlights the potential for broadly trained LLMs, such as those leveraging the comprehensive dataset Therapeutics Data Commons (TDC), to serve as powerful preliminary tools for hypothesis generation, information synthesis, and candidate prioritization. While specialist models would likely retain their value for complex, domain-specific challenges, future research should explore synergistic approaches that combine the strengths of both generalist and specialist therapeutic AI.", + "bbox": [ + 112, + 119, + 883, + 239 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A significant advancement with TxGemma-Chat is its ability to provide reasoning for its predictions, a first in therapeutic AI and a feature lost in TxGemma-Predict, likely due to \"catastrophic forgetting\" [95]. While explainability may introduce a small trade-off in raw predictive power, it provides a crucial window into the model's decision-making, a factor of paramount importance in therapeutic development. For instance, explaining blood-brain barrier permeability based on molecular structure provides valuable insights for medicinal chemists. Beyond its research applications, TxGemma-Chat holds a significant educational potential, enabling students and researchers to explore complex therapeutic concepts. At the same time, it is important to acknowledge that provided explanations are correlations, not necessarily causal, and must be interpreted with caution. The model's occasional inability to explain certain predictions reveals its knowledge boundaries. Future research should prioritize improving reliability and comprehensive explanations. Even with current limitations, TxGemma-Chat represents an important improvement over the \"black-box\" paradigm.", + "bbox": [ + 112, + 246, + 883, + 412 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Expanding beyond single-step predictions, Agentic-Tx demonstrates the potential for LLMs to orchestrate complex workflows. By integrating TxGemma with a suite of external tools (PubMed, Wikipedia, chemical databases, etc), Agentic-Tx can tackle multi-step reasoning tasks that would be difficult for a standalone LLM. Its strong performance on benchmarks like ChemBench Chemical Preference and Humanity's Last Exam (HLE) highlights the synergistic value of integrating domain-specific knowledge from TxGemma with general reasoning and information retrieval. This modular, tool-based design further ensures flexibility and extensibility, allowing for future integration of new tools and data. Importantly, it solves the issue of knowledge cut-off in LLMs by providing access to up-to-date information. Agentic-Tx with its autonomous and collaborative operation is a powerful asset for augmenting researchers and advancing therapeutic development.", + "bbox": [ + 112, + 419, + 883, + 556 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The data efficiency of TxGemma is clearly demonstrated in fine-tuning experiments on TrialBench. It achieves robust performance on novel tasks with substantially less training data compared to baseline models, showcasing the benefits of pre-training on a broad and diverse dataset like TDC. This efficiency is particularly critical in therapeutic domains, where data is often proprietary and limited. Moreover, our finding that adding textual context, while improving overall results, can dilute the influence of molecular representations emphasizes the importance of balancing the benefits of additional information with strategic feature selection.", + "bbox": [ + 112, + 561, + 883, + 652 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Although our in-silico results across a diverse range of therapeutic tasks are highly encouraging, we acknowledge that TxGemma's performance has not yet been validated in real-world, wet-lab experiments. Prospective validation in these settings represents a crucial next step. However, a cornerstone of this work is our commitment to open model release. By making TxGemma readily accessible to the research community, we aim to facilitate its rigorous validation and adaptation. Researchers can tailor TxGemma to their specific datasets, encompassing tasks and distribution shifts beyond the scope of TDC. Given the predominantly proprietary nature of therapeutic data, we believe this collaborative, community-driven approach is essential for translating TxGemma into tangible therapeutic applications", + "bbox": [ + 112, + 657, + 883, + 780 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 112, + 797, + 241, + 814 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In conclusion, this work introduced TxGemma, a suite of efficient, generalist LLMs designed to improve therapeutic development. By leveraging extensive therapeutic instruction-tuning datasets and building upon the foundation of Gemma-2, TxGemma achieves exceptional performance across a wide range of predictive and generative therapeutic tasks, surpassing or matching both generalist and specialist state-of-the-art models. Notably, TxGemma's conversational counterparts, a first in therapeutic AI, provide reasoning and explanations,", + "bbox": [ + 112, + 828, + 883, + 904 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "moving beyond traditional black-box predictions to facilitate mechanistic understanding and scientific discourse. Furthermore, the integration of TxGemma into an agentic system, Agentic-Tx, demonstrates its capacity to solve complex, multi-step problems, achieving state-of-the-art results on challenging reasoning-intensive tasks. Finally, and critically, the open release of TxGemma empowers the research community and scientist to adapt and refine the models on their own private data, potentially leading to significant advancements in drug discovery and development. Through these contributions, TxGemma represents a meaningful step towards more efficient, transparent, and collaborative AI-driven therapeutic research.", + "bbox": [ + 109, + 90, + 885, + 196 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 109, + 227, + 264, + 242 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "This project was a collaboration between teams at Google DeepMind and Google Research. We thank Marcus Brubaker, David Belanger, Justin Chen, and David Steiner for the feedback and insight which significantly contributed to the enhancement of this report. We thank Tris Warkentin, Glenn Cameron, Victor Cotruta, Fereshteh Mahvar, Tiffany Chen, Omar Sansevier, Kathleen Kenealy, Joe Fernandez, Gus Martins, Nabila Babar, Sara Smoot, Antonia Paterson, Pankil Botadra, Metin Toksoz-Exley, Tim Thelin, Can \"John\" Kirmizi, and Fayaz Jamil for their collaborative efforts in enabling the open model launch of TxGemma. We also thank Phoebe Kirk, Rachelle Sico, Yun Liu, Anand Rao, Jon Small, Juanita Bawagan, Jane Park, Jenn Sturgeon, Fred Alcober, Samantha Heyman, Abhinav Das for their valuable insights and technical support. We are also grateful to Zoubin Ghahramani, Raia Hadsell, Avinatan Hassidim, Katherine Chou, Dale Webster, Jon Shlens, and Pushmeet Kohli for their support during the course of this project.", + "bbox": [ + 109, + 252, + 887, + 405 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Inclusion and ethics", + "text_level": 1, + "bbox": [ + 109, + 416, + 279, + 430 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "While AI offers transformative potential in drug discovery, ethical considerations and transparency remain crucial. Biases in training data can lead to inequities, highlighting the need for diverse datasets and explainable AI systems. Our model, while still in the research stage, highlights the continuous need for development and refinement in this field. We acknowledge the difficulty in explaining the inner workings of complex models, but remain dedicated to advancing research in this area.", + "bbox": [ + 109, + 441, + 885, + 518 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Data availability", + "text_level": 1, + "bbox": [ + 109, + 530, + 251, + 545 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The Therapeutics Data Commons (TDC) datasets used for developing, benchmarking, and evaluating TxGemma are publicly available on their website. The benchmarking datasets used in this study—Humanity's Last Exam (HLE), GPQA (Diamond), ChemBench, and TrialBench (Serious Adverse Event Prediction)—are all publicly available via their respective websites.", + "bbox": [ + 109, + 555, + 885, + 617 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Code availability", + "text_level": 1, + "bbox": [ + 109, + 628, + 254, + 643 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "All of the components used in this work are available publicly. For reproducibility, we have documented technical methods and data curation detail in depth, while keeping the paper accessible to clinical and general scientific audiences. Specifically, all the data needs to reproduce this work is publicly accessible to the community. TxGemma, a collection of lightweight state-of-the-art, open language models, are provided for researchers in three model size of 2B, 9B, and 27B and is accessible through Vertex AI Model Garden and Hugging Face. TxGemma's Github repository including supporting code and colab notebooks for quick start are also available at: https://github.com/google-gemini/gemma-cookbook/tree/main/TxGemma. We have specifically provided starter colabs for inference, fine-tuning, and exploring agentic capabilities. TxGemma remains a research model and requires refinement. We look forward to working with research partners, regulators, and providers to validate and explore safe onward uses of TxGemma.", + "bbox": [ + 109, + 654, + 887, + 806 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Author Contributions", + "text_level": 1, + "bbox": [ + 109, + 816, + 294, + 832 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "E.W., S.S., and S.A. made substantial contributions to the conception, design, and evaluation of this work. They played a key role in data analysis, interpretation of results, and the drafting and revision of the manuscript. P.F.J. contributed to drafting and revision of the manuscript. F.Z. contributed to the data processing and model training in the manuscript. R.P. contributed to obtaining necessary legal approvals,", + "bbox": [ + 109, + 843, + 885, + 905 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 862, + 936, + 885, + 949 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "and organizational support. All authors participated in critically reviewing and revising the manuscript and interpreting the data and findings.", + "bbox": [ + 109, + 90, + 885, + 122 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Competing interests", + "text_level": 1, + "bbox": [ + 109, + 132, + 284, + 148 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "This study was funded by Alphabet Inc and/or a subsidiary thereof ('Alphabet'). E.W., S.S., P.F.J., F.Z., R.P., Y.M., J.B., D.F., and S.A. are employees of Alphabet and may own stock as part of the standard compensation package.", + "bbox": [ + 109, + 157, + 887, + 204 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 862, + 936, + 883, + 949 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 120, + 90, + 228, + 104 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Mirza, A., Alampara, N., Kunchapu, S., Rios-Garcia, M., Emoekabu, B., Krishnan, A., Gupta, T., Schilling-Wilhelmi, M., Okereke, M., Aneesh, A., et al. Are large language models superhuman chemists? arXiv preprint arXiv:2404.01475 (2024).", + "2. OpenAI. Learning to Reason with LLMs https://openai.com/index/learning-to-reason-with-llms/. Accessed: Wednesday 9th April, 2025. 2024.", + "3. Sun, D., Gao, W., Hu, H. & Zhou, S. Why $90\\%$ of clinical drug development fails and how to improve it? Acta Pharmaceutica Sinica B 12, 3049-3062 (2022).", + "4. Hinkson, I. V., Madej, B. & Stahlberg, E. A. Accelerating therapeutics for opportunities in medicine: a paradigm shift in drug discovery. Frontiers in pharmacology 11, 770 (2020).", + "5. Kumar, A., Voet, A. & Zhang, K. Y. Fragment based drug design: from experimental to computational approaches. *Current medicinal chemistry* 19, 5128-5147 (2012).", + "6. Velez-Arce, A., Huang, K., Li, M. M., Lin, X., Gao, W., Fu, T., Kellis, M., Pentelute, B. L. & Zitnik, M. TDC-2: Multimodal foundation for therapeutic science. bioRxiv, 2024-06 (2024).", + "7. Huang, K., Fu, T., Gao, W., Zhao, Y., Roohani, Y., Leskovec, J., Coley, C. W., Xiao, C., Sun, J. & Zitnik, M. Therapeutics data commons: Machine learning datasets and tasks for drug discovery and development. arXiv preprint arXiv:2102.09548 (2021).", + "8. Huang, K., Fu, T., Gao, W., Zhao, Y., Roohani, Y., Leskovec, J., Coley, C. W., Xiao, C., Sun, J. & Zitnik, M. Artificial intelligence foundation for therapeutic science. Nature chemical biology 18, 1033-1036 (2022).", + "9. Bubeck, S., Chandrasekaran, V., Eldan, R., Gehrke, J., Horvitz, E., Kamar, E., Lee, P., Lee, Y. T., Li, Y., Lundberg, S., et al. Sparks of artificial general intelligence: Early experiments with GPT-4. arXiv preprint arXiv:2303.12712 (2023).", + "10. Taylor, R., Kardas, M., Cucurull, G., Scialom, T., Hartshorn, A., Saravia, E., Poulton, A., Kerkez, V. & Stojnic, R. Galactica: A large language model for science. arXiv preprint arXiv:2211.09085 (2022).", + "11. Telenti, A., Auli, M., Hie, B. L., Maher, C., Saria, S. & Ioannidis, J. P. Large language models for science and medicine. European journal of clinical investigation 54, e14183 (2024).", + "12. Chaves, J. M. Z., Wang, E., Tu, T., Vaishnav, E. D., Lee, B., Mahdavi, S. S., Semturs, C., Fleet, D., Natarajan, V. & Azizi, S. Tx-LLM: A Large Language Model for Therapeutics. arXiv preprint arXiv:2406.06316 (2024).", + "13. Team, G., Mesnard, T., Hardin, C., Dadashi, R., Bhupatiraju, S., Pathak, S., Sifre, L., Riviere, M., Kale, M. S., Love, J., et al. Gemma: Open models based on gemini research and technology. arXiv preprint arXiv:2403.08295 (2024).", + "14. Team, G., Riviere, M., Pathak, S., Sessa, P. G., Hardin, C., Bhupatiraju, S., Hussenot, L., Mesnard, T., Shahriari, B., Ramé, A., et al. Gemma 2: Improving open language models at a practical size. arXiv preprint arXiv:2408.00118 (2024).", + "15. Phan, L., Gatti, A., Han, Z., Li, N., Hu, J., Zhang, H., Shi, S., Choi, M., Chopra, A., et al. Humanity's Last Exam. arXiv preprint arXiv:2501.14249 (2025).", + "16. Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J. D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al. Language models are few-shot learners. Advances in neural information processing systems 33, 1877-1901 (2020).", + "17. Longpre, S., Hou, L., Vu, T., Webson, A., Chung, H. W., Tay, Y., Zhou, D., Le, Q. V., Zoph, B., Wei, J., et al. The FLAN collection: Designing data and methods for effective instruction tuning in International Conference on Machine Learning (2023), 22631-22648.", + "18. Team, G., Anil, R., Borgeaud, S., Alayrac, J.-B., Yu, J., Soricut, R., Schalkwyk, J., Dai, A. M., Hauth, A., Millican, K., et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805 (2023).", + "19. Landrum, G. RDKit: Open-Source Cheminformatics Software. https://github.com/rdkit/rdkit/releases/tag/Release_2016_09_4 (2016).", + "20. Dalke, A. The chemfp project. Journal of cheminformatics 11, 1-21 (2019).", + "21. Sievers, F., Wilm, A., Dineen, D., Gibson, T. J., Karplus, K., Li, W., Lopez, R., McWilliam, H., Remmert, M., Söding, J., et al. Fast, scalable generation of high-quality protein multiple sequence alignments using Clustal Omega. Molecular systems biology 7, 539 (2011).", + "22. Yao, S., Zhao, J., Yu, D., Du, N., Shafran, I., Narasimhan, K. & Cao, Y. React: Synergizing reasoning and acting in language models. arXiv preprint arXiv:2210.03629 (2022).", + "23. Yu, B., Baker, F. N., Chen, Z., Ning, X. & Sun, H. Llasmol: Advancing large language models for chemistry with a large-scale, comprehensive, high-quality instruction tuning dataset. arXiv preprint arXiv:2402.09391 (2024).", + "24. Mendez-Lucio, O., Nicolaou, C. A. & Earnshaw, B. MolE: a foundation model for molecular graphs using disentangled attention. Nature Communications 15, 9431 (2024).", + "25. Team, G. Gemma 3 technical report. Google DeepMind (2025).", + "26. Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D. & Steinhardt, J. Measuring massive multitask language understanding. arXiv preprint arXiv:2009.03300 (2020).", + "27. Rein, D., Hou, B. L., Stickland, A. C., Petty, J., Pang, R. Y., Dirani, J., Michael, J. & Bowman, S. R. Gpqa: A graduate-level google-proof q@a benchmark in First Conference on Language Modeling (2024).", + "28. Skarlinski, M. D., Cox, S., Laurent, J. M., Braza, J. D., Hinks, M., Hammerling, M. J., Ponnapati, M., Rodriques, S. G. & White, A. D. Language agents achieve superhuman synthesis of scientific knowledge. arXiv preprint arXiv:2409.13740 (2024).", + "29. Chen, J., Hu, Y., Wang, Y., Lu, Y., Cao, X., Lin, M., Xu, H., Wu, J., Xiao, C., Sun, J., et al. TrialBench: Multi-modal artificial intelligence-ready clinical trial datasets. arXiv preprint arXiv:2407.00631 (2024).", + "30. Torng, W. & Altman, R. B. Graph convolutional neural networks for predicting drug-target interactions. Journal of chemical information and modeling 59, 4131-4149 (2019)." + ], + "bbox": [ + 112, + 119, + 885, + 901 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "31. Stärk, H., Ganea, O., Pattanaik, L., Barzilay, R. & Jaakkola, T. Equibind: Geometric deep learning for drug binding structure prediction in International conference on machine learning (2022), 20503-20521.", + "32. Xiong, Z., Wang, D., Liu, X., Zhong, F., Wan, X., Li, X., Li, Z., Luo, X., Chen, K., Jiang, H., et al. Pushing the boundaries of molecular representation for drug discovery with the graph attention mechanism. Journal of medicinal chemistry 63, 8749-8760 (2019).", + "33. Heid, E. & Green, W. H. Machine learning of reaction properties via learned representations of the condensed graph of reaction. Journal of Chemical Information and Modeling 62, 2101-2110 (2021).", + "34. Yang, K., Swanson, K., Jin, W., Coley, C., Eiden, P., Gao, H., Guzman-Perez, A., Hopper, T., Kelley, B., Mathea, M., et al. Analyzing learned molecular representations for property prediction. Journal of chemical information and modeling 59, 3370-3388 (2019).", + "35. Morrone, J. A., Weber, J. K., Huynh, T., Luo, H. & Cornell, W. D. Combining docking pose rank and structure with deep learning improves protein-ligand binding mode prediction over a baseline docking approach. Journal of chemical information and modeling 60, 4170-4179 (2020).", + "36. Mohr, B., Shmilovich, K., Kleinwächter, I. S., Schneider, D., Ferguson, A. L. & Bereau, T. Data-driven discovery of cardiolipin-selective small molecules by computational active learning. Chemical Science 13, 4498-4511 (2022).", + "37. Stokes, J. M., Yang, K., Swanson, K., Jin, W., Cubillos-Ruiz, A., Donghia, N. M., MacNair, C. R., French, S., Carfrae, L. A., Bloom-Ackermann, Z., et al. A deep learning approach to antibiotic discovery. Cell 180, 688-702 (2020).", + "38. Rogers, D. & Hahn, M. Extended-connectivity fingerprints. Journal of chemical information and modeling 50, 742-754 (2010).", + "39. Tayyebi, A., Alshami, A. S., Rabiei, Z., Yu, X., Ismail, N., Talukder, M. J. & Power, J. Prediction of organic compound aqueous solubility using machine learning: a comparison study of descriptor-based and fingerprints-based models. Journal of Cheminformatics 15, 99 (2023).", + "40. Belenahalli Shekarappa, S., Kandagalla, S. & Lee, J. Development of machine learning models based on molecular fingerprints for selection of small molecule inhibitors against JAK2 protein. Journal of Computational Chemistry 44, 1493-1504 (2023).", + "41. Huang, K., Chandak, P., Wang, Q., Havaldar, S., Vaid, A., Leskovec, J., Nadkarni, G. N., Glicksberg, B. S., Gehlenborg, N. & Zitnik, M. A foundation model for clinician-centered drug repurposing. Nature Medicine, 1-13 (2024).", + "42. Jumper, J., Evans, R., Pritzel, A., Green, T., Figurnov, M., Ronneberger, O., Tunyasuvunakool, K., Bates, R., Zidek, A., Potapenko, A., et al. Highly accurate protein structure prediction with AlphaFold. nature 596, 583-589 (2021).", + "43. Tunyasuvunakool, K., Adler, J., Wu, Z., Green, T., Zielinski, M., Žídek, A., Bridgland, A., Cowie, A., Meyer, C., Laydon, A., et al. Highly accurate protein structure prediction for the human proteome. Nature 596, 590-596 (2021).", + "44. Senior, A. W., Evans, R., Jumper, J., Kirkpatrick, J., Sifre, L., Green, T., Qin, C., Zidek, A., Nelson, A. W., Bridgland, A., et al. Improved protein structure prediction using potentials from deep learning. Nature 577, 706-710 (2020).", + "45. Abramson, J., Adler, J., Dunger, J., Evans, R., Green, T., Pritzel, A., Ronneberger, O., Willmore, L., Ballard, A. J., Bambrick, J., et al. Accurate structure prediction of biomolecular interactions with AlphaFold 3. Nature, 1-3 (2024).", + "46. Zambaldi, V., La, D., Chu, A. E., Patani, H., Danson, A. E., Kwan, T. O., Frerix, T., Schneider, R. G., Saxton, D., Thillaisundaram, A., et al. De novo design of high-affinity protein binders with AlphaProteo. arXiv preprint arXiv:2409.08022 (2024).", + "47. Ren, F., Ding, X., Zheng, M., Korzinkin, M., Cai, X., Zhu, W., Mantsyzov, A., Aliper, A., Aladinskiy, V., Cao, Z., et al. AlphaFold accelerates artificial intelligence powered drug discovery: efficient discovery of a novel CDK20 small molecule inhibitor. Chemical science 14, 1443-1452 (2023).", + "48. Vaswani, A. Attention is all you need. Advances in Neural Information Processing Systems (2017).", + "49. Zhang, S., Dong, L., Li, X., Zhang, S., Sun, X., Wang, S., Li, J., Hu, R., Zhang, T., Wu, F., et al. Instruction tuning for large language models: A survey. arXiv preprint arXiv:2308.10792 (2023).", + "50. Kaufmann, T., Weng, P., Bengs, V. & Hüllermeier, E. A survey of reinforcement learning from human feedback. arXiv preprint arXiv:2312.14925 (2023).", + "51. Liu, Y. & Lapata, M. Text summarization with pretrained encoders. arXiv preprint arXiv:1908.08345 (2019).", + "52. Kenton, J. D. M.-W. C. & Toutanova, L. K. BERT: Pre-training of deep bidirectional transformers for language understanding in Proceedings of naacL-HLT 1 (2019).", + "53. Luo, X., Rechardt, A., Sun, G., Nejad, K. K., Yáñez, F., Yilmaz, B., Lee, K., Cohen, A. O., Borghesani, V., Pashkov, A., et al. Large language models surpass human experts in predicting neuroscience results. Nature human behaviour, 1-11 (2024).", + "54. Seidl, P., Vall, A., Hochreiter, S. & Klambauer, G. Enhancing activity prediction models in drug discovery with the ability to understand human language in International Conference on Machine Learning (2023), 30458-30490.", + "55. Rives, A., Meier, J., Sercu, T., Goyal, S., Lin, Z., Liu, J., Guo, D., Ott, M., Zitnick, C. L., Ma, J., et al. Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences. Proceedings of the National Academy of Sciences 118, e2016239118 (2021).", + "56. Lin, Z., Akin, H., Rao, R., Hie, B., Zhu, Z., Lu, W., Smetanin, N., Verkuil, R., Kabeli, O., Shmueli, Y., et al. Evolutionary-scale prediction of atomic-level protein structure with a language model. Science 379, 1123-1130 (2023).", + "57. Alley, E. C., Khimulya, G., Biswas, S., AlQuraishi, M. & Church, G. M. Unified rational protein engineering with sequence-based deep representation learning. Nature methods 16, 1315-1322 (2019).", + "58. Ferruz, N., Schmidt, S. & Höcker, B. ProtGPT2 is a deep unsupervised language model for protein design. Nature communications 13, 4348 (2022).", + "59. Nguyen, E., Poli, M., Durrant, M. G., Kang, B., Katrekar, D., Li, D. B., Bartie, L. J., Thomas, A. W., King, S. H., Brixi, G., et al. Sequence modeling and design from molecular to genome scale with Evo. Science 386, eado9336 (2024)." + ], + "bbox": [ + 109, + 90, + 887, + 893 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "60. Dalla-Torre, H., Gonzalez, L., Mendoza-Revilla, J., Lopez Carranza, N., Grzywaczewski, A. H., Oteri, F., Dallago, C., Trop, E., de Almeida, B. P., Sirelkhatim, H., et al. Nucleotide Transformer: building and evaluating robust foundation models for human genomics. Nature Methods, 1-11 (2024).", + "61. Cornman, A., West-Roberts, J., Camargo, A. P., Roux, S., Beracochea, M., Mirdita, M., Ovchinnikov, S. & Hwang, Y. The OMG dataset: An Open MetaGenomic corpus for mixed-modality genomic language modeling. bioRxiv, 2024-08 (2024).", + "62. Zhuo, L., Chi, Z., Xu, M., Huang, H., Zheng, H., He, C., Mao, X.-L. & Zhang, W. Protllm: An interleaved protein-language llm with protein-as-word pre-training. arXiv preprint arXiv:2403.07920 (2024).", + "63. Pei, Q., Zhang, W., Zhu, J., Wu, K., Gao, K., Wu, L., Xia, Y. & Yan, R. Biot5: Enriching cross-modal integration in biology with chemical knowledge and natural language associations. arXiv preprint arXiv:2310.07276 (2023).", + "64. Anonymous. Parameter Efficient Graph Encoding for Large Language Models 2025. https://openreview.net/forum?id=RbcXV63ZJk.", + "65. Cui, H., Wang, C., Maan, H., Pang, K., Luo, F., Duan, N. & Wang, B. scGPT: toward building a foundation model for single-cell multi-omics using generative AI. Nature Methods, 1-11 (2024).", + "66. Chen, Y. & Zou, J. GenePT: a simple but effective foundation model for genes and cells built from ChatGPT. bioRxiv, 2023-10 (2024).", + "67. Theodoris, C. V., Xiao, L., Chopra, A., Chaffin, M. D., Al Sayed, Z. R., Hill, M. C., Mantineo, H., Brydon, E. M., Zeng, Z., Liu, X. S., et al. Transfer learning enables predictions in network biology. Nature 618, 616-624 (2023).", + "68. Schaar, A. C., Tejada-Lapuerta, A., Palla, G., Gutgesell, R., Halle, L., Minaeva, M., Vornholz, L., Dony, L., Drummer, F., Bahrami, M., et al. Nicheformer: a foundation model for single-cell and spatial omics. bioRxiv, 2024-04 (2024).", + "69. Levine, D., Rizvi, S. A., Lévy, S., Pallikkavaliyaveetil, N., Zhang, D., Chen, X., Ghadermarzi, S., Wu, R., Zheng, Z., Vrkic, I., et al. Cell2Sentence: teaching large language models the language of biology. BioRxiv, 2023-09 (2023).", + "70. Xia, Y., Jin, P., Xie, S., He, L., Cao, C., Luo, R., Liu, G., Wang, Y., Liu, Z., Chen, Y.-J., et al. NatureLM: Deciphering the Language of Nature for Scientific Discovery. arXiv preprint arXiv:2502.07527 (2025).", + "71. Wang, L., Ma, C., Feng, X., Zhang, Z., Yang, H., Zhang, J., Chen, Z., Tang, J., Chen, X., Lin, Y., et al. A survey on large language model based autonomous agents. Frontiers of Computer Science 18, 186345 (2024).", + "72. Shanahan, M., McDonell, K. & Reynolds, L. Role play with large language models. Nature 623, 493-498 (2023).", + "73. Qian, C., Cong, X., Yang, C., Chen, W., Su, Y., Xu, J., Liu, Z. & Sun, M. Communicative agents for software development. arXiv preprint arXiv:2307.07924 6 (2023).", + "74. Hong, S., Zheng, X., Chen, J., Cheng, Y., Wang, J., Zhang, C., Wang, Z., Yau, S. K. S., Lin, Z., Zhou, L., et al. Metagpt: Meta programming for multi-agent collaborative framework. arXiv preprint arXiv:2308.00352 (2023).", + "75. Talebirad, Y. & Nadiri, A. Multi-agent collaboration: Harnessing the power of intelligent llm agents. arXiv preprint arXiv:2306.03314 (2023).", + "76. Hao, S., Gu, Y., Ma, H., Hong, J. J., Wang, Z., Wang, D. Z. & Hu, Z. Reasoning with language model is planning with world model. arXiv preprint arXiv:2305.14992 (2023).", + "77. Huang, W., Abbeel, P., Pathak, D. & Mordatch, I. Language models as zero-shot planners: Extracting actionable knowledge for embodied agents in International conference on machine learning (2022), 9118-9147.", + "78. Song, C. H., Wu, J., Washington, C., Sadler, B. M., Chao, W.-L. & Su, Y. Lm-planner: Few-shot grounded planning for embodied agents with large language models in Proceedings of the IEEE/CVF International Conference on Computer Vision (2023), 2998-3009.", + "79. Wang, Z., Cai, S., Chen, G., Liu, A., Ma, X. & Liang, Y. Describe, explain, plan and select: Interactive planning with large language models enables open-world multi-task agents. arXiv preprint arXiv:2302.01560 (2023).", + "80. Yao, S., Yu, D., Zhao, J., Shafran, I., Griffiths, T., Cao, Y. & Narasimhan, K. Tree of thoughts: Deliberate problem solving with large language models. Advances in Neural Information Processing Systems 36 (2024).", + "81. Parisi, A., Zhao, Y. & Fiedel, N. Talm: Tool augmented language models. arXiv preprint arXiv:2205.12255 (2022).", + "82. Schick, T., Dwivedi-Yu, J., Dessi', R., Raileanu, R., Lomeli, M., Hambro, E., Zettlemoyer, L., Cancedda, N. & Scialom, T. Toolformer: Language models can teach themselves to use tools. Advances in Neural Information Processing Systems 36, 68539-68551 (2023).", + "83. Qin, Y., Hu, S., Lin, Y., Chen, W., Ding, N., Cui, G., Zeng, Z., Zhou, X., Huang, Y., Xiao, C., et al. Tool learning with foundation models. ACM Computing Surveys 57, 1-40 (2024).", + "84. Cai, T., Wang, X., Ma, T., Chen, X. & Zhou, D. Large language models as tool makers. arXiv preprint arXiv:2305.17126 (2023).", + "85. Shinn, N., Cassano, F., Gopinath, A., Narasimhan, K. & Yao, S. Reflexion: Language agents with verbal reinforcement learning. Advances in Neural Information Processing Systems 36 (2024).", + "86. Yang, J., Jimenez, C. E., Wettig, A., Lieret, K., Yao, S., Narasimhan, K. & Press, O. Swe-agent: Agent-computer interfaces enable automated software engineering. arXiv preprint arXiv:2405.15793 (2024).", + "87. Qian, C., Dang, Y., Li, J., Liu, W., Chen, W., Yang, C., Liu, Z. & Sun, M. Experiential co-learning of software-developing agents. arXiv preprint arXiv:2312.17025 (2023).", + "88. Gottweis, J., Weng, W.-H., Daryin, A., Tu, T., Palepu, A., Sirkovic, P., Myaskovsky, A., Weissenberger, F., Rong, K., Tanno, R., et al. Towards an AI co-scientist. arXiv preprint arXiv:2502.18864 (2025).", + "89. Schmidgall, S., Su, Y., Wang, Z., Sun, X., Wu, J., Yu, X., Liu, J., Liu, Z. & Barsoum, E. Agent Laboratory: Using LLM Agents as Research Assistants. arXiv preprint arXiv:2501.04227 (2025).", + "90. Swanson, K., Wu, W., Bulaong, N. L., Pak, J. E. & Zou, J. The virtual lab: Ai agents design new sars-cov-2 nanobodies with experimental validation. bioRxiv, 2024-11 (2024).", + "91. Lu, C., Lu, C., Lange, R. T., Foerster, J., Clune, J. & Ha, D. The ai scientist: Towards fully automated open-ended scientific discovery. arXiv preprint arXiv:2408.06292 (2024)." + ], + "bbox": [ + 111, + 90, + 885, + 910 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 862, + 936, + 882, + 948 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "92. M. Bran, A., Cox, S., Schilter, O., Baldassari, C., White, A. D. & Schwaller, P. Augmenting large language models with chemistry tools. Nature Machine Intelligence, 1-11 (2024).", + "93. Boiko, D. A., MacKnight, R., Kline, B. & Gomes, G. Autonomous chemical research with large language models. Nature 624, 570-578 (2023).", + "94. Gao, S., Zhu, R., Kong, Z., Noori, A., Su, X., Ginder, C., Tsiligkaridis, T. & Zitnik, M. TxAgent: An AI Agent for Therapeutic Reasoning Across a Universe of Tools. arXiv preprint arXiv:2503.10970 (2025).", + "95. Aleixo, E. L., Colonna, J. G., Cristo, M. & Fernandes, E. Catastrophic forgetting in deep learning: A comprehensive taxonomy. arXiv preprint arXiv:2312.10549 (2023)." + ], + "bbox": [ + 109, + 90, + 885, + 191 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Supplementary Material", + "text_level": 1, + "bbox": [ + 109, + 85, + 439, + 109 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Version control", + "text_level": 1, + "bbox": [ + 109, + 126, + 263, + 142 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "V0 (25 March 2025) $\\rightarrow$ V1", + "text_level": 1, + "bbox": [ + 109, + 157, + 333, + 174 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Upgraded the Agentic-Tx system's orchestrator from Gemini 2.0 to Gemini 2.5. This enhancement results in significant performance improvements in complex workflow orchestration, as detailed in Table 3.", + "- Added performance results of TxGemma-Predict and TxGemma-Chat (trained only on commercially licensed datasets) for binary classification (Table S.17), regression, and generation tasks (Table S.18)." + ], + "bbox": [ + 133, + 186, + 883, + 248 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "A Summary", + "text_level": 1, + "bbox": [ + 109, + 265, + 235, + 282 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Data details as listed in Section B:", + "bbox": [ + 135, + 295, + 403, + 308 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Table S.1: Excluded TDC tasks and reasons for exclusion.", + "- Table S.2: Number of samples in training, validation, and test sets for all binary classification tasks.", + "- Table S.3: Number of samples in training, validation, and test sets for all regression and generation tasks.", + "- Table S.4: Descriptions of the binary classification tasks.", + "- Table S.5: Descriptions of the regression and generation tasks.", + "- Table S.6 Types of features in the processed TDC data along with illustrative examples.", + "Figure S.1: Distribution of TDC task sizes, aggregated over train, validation, and test sets." + ], + "bbox": [ + 166, + 314, + 883, + 452 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Method and modeling details as listed in Section C:", + "bbox": [ + 135, + 455, + 527, + 470 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Table S.7 Examples of prompts for binary classification tasks.", + "- Table S.8 Examples of prompts for regression and generation tasks.", + "- Table S.9 Example of a 10-shot prompt for a binary classification task.", + "- Table S.10 Example of prompts for predicting adverse events in clinical trials.", + "- Table S.11 Example of Agentic-Tx response to a chemical preference question.", + "- Table S.12 List of tools available to Agentic-Tx.", + "- Figure S.2 Distribution of Tanimoto similarities for 10 nearest neighbors by dataset splits in the AMES task.", + "- Section C.1 Details about Wilcoxon signed-rank test used to assess model performance." + ], + "bbox": [ + 168, + 476, + 883, + 613 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Additional results as listed in Section D:", + "bbox": [ + 135, + 618, + 444, + 632 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "- Additional prediction results for TxGemma (Section D.1)", + "bbox": [ + 168, + 637, + 599, + 652 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* Table S.13 Performance on binary classification tasks for specialist SOTA, base Gemma-2, and TxGemma-Predict models.", + "* Table S.14 Performance on regression and generation tasks for specialist SOTA, base Gemma-2, and TxGemma-Predict models.", + "* Table S.15 Performance on binary classification tasks for TxGemma-Predict, TxGemma-Chat, and Tx-LLM models.", + "* Table S.16 Performance on regression and generation tasks for TxGemma-Predict, TxGemma-Chat, and Tx-LLM models.", + "* Table S.17 Performance on binary classification tasks for TxGemma-Predict and TxGemma-Chat models trained only on datasets with commercial licenses.", + "* Table S.18 Performance on regression and generation tasks for TxGemma-Predict and TxGemma-Chat models trained only on datasets with commercial licenses.", + "* Figure S.4 Performance of TxGemma-27B-Predict compared to generalist and specialist SOTA models.", + "* Figure S.5 Comparison of TxGemma-27B-Predict with LlaSMol on select small molecule tasks.", + "* Figure S.6 Comparison of TxGemma-27B-Predict with MolE on select small molecule tasks.", + "* Figure S.11 Inference speed of TxGemma models at various sizes." + ], + "bbox": [ + 199, + 654, + 883, + 912 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 862, + 936, + 883, + 949 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* Figure S.12 Percent contamination for datasets and cosine similarity analysis.", + "* Figure S.13 Performance on contaminated datasets before and after filtering out contaminated datapoints.", + "* Figure S.16 Performance by feature type of all TxGemma-Predict sizes.", + "* Figure S.17 Comparison of TxGemma-Predict performances over different sizes and with Gemma-2 models.", + "* Figure S.18 Correlations of TxGemma-27B-Predict predictions for toxicity and clinical trial approval tasks." + ], + "bbox": [ + 200, + 90, + 883, + 210 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "- Conversing with TxGemma-27B-Predict and TxGemma-27B-Chat (Section D.2)", + "bbox": [ + 169, + 214, + 763, + 228 + ], + "page_idx": 23 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* Figure S.7 Comparison of TxGemma-27B-Predict, TxGemma-27B-Chat, and Gemma-2-27B on MMLU.", + "* Figure S.8 Example of a dialogue with TxGemma-27B-Predict about general topics.", + "* Figure S.9 Example of a multi-turn dialogue with TxGemma-27B-Predict about its predictions.", + "* Figure S.10 Example of a prompt format the enables TxGemma-Chat to provide reasoning for challenging tasks." + ], + "bbox": [ + 200, + 231, + 883, + 321 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "- Additional Agentic-Tx Results (Section D.3)", + "bbox": [ + 169, + 325, + 506, + 340 + ], + "page_idx": 23 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* Figure S.14 Agentic-Tx tool use frequencies for chemical preference and HLE benchmarks.", + "* Figure S.15 Agentic-Tx tool use frequency per question for chemical preference questions." + ], + "bbox": [ + 200, + 343, + 864, + 373 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "- Proof-of-concept example using TxGemma (Section D.4)", + "bbox": [ + 169, + 376, + 594, + 391 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "* Figure S.3 Illustration of a possible application of TxGemma to end-to-end therapeutic development.", + "bbox": [ + 200, + 393, + 883, + 424 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "B Data details", + "text_level": 1, + "bbox": [ + 109, + 90, + 261, + 104 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "This section provides a breakdown of the tasks used in our study, including information on excluded tasks and the size of training, validation, and test sets for binary classification, regression, and generation tasks.", + "bbox": [ + 109, + 119, + 883, + 151 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "As previously mentioned, we excluded a small number of tasks from TDC for various reasons. Table S.1 provides an overview of the excluded tasks and the rationale behind their exclusion. The primary reasons for exclusion were the tasks' relevance to the study, limitations of LLMs, and specific data characteristics, such as the absence of clear metrics or redundancy. For instance, tasks like QM7b, QM8, and QM9, which focus on predicting quantum properties, were not directly relevant to the study's focus on therapeutic development. Similarly, IEDB Jespersen and PDB Jespersen were excluded due to their small size and the complexity of implementing token prediction, as opposed to binary classification, within an LLM framework. Tasks such as DrugBank DDI, TWOSIDES, and USPTO Catalyst posed challenges due to the large number of potential labels, making them difficult for LLMs to process effectively. MOSES, ZINC, and ChEMBL were excluded because they lacked well-defined evaluation metrics. Finally, USPTO 50K and USPTO Reaction were excluded as they either overlapped with or were subsets of the USPTO task.", + "bbox": [ + 109, + 155, + 885, + 323 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Tables S.2 and S.3 specify the number of samples in the training, validation, and test sets for the included binary classification, regression, and generation tasks, respectively. Substantial variability in task sizes across different tasks is shown in these tables. The binary classification tasks range from 196 to 1,406,988 samples, while the regression and generation tasks range from 345 to 775,767 samples. This variability highlights the diverse data availability landscape across various tasks. Figure S.1 provides a visual representation of the distribution of TDC task sizes, aggregated across train, validation, and test sets. For tasks encompassing multiple subtasks, like ToxCast, the task size is computed by summing the sizes of each individual dataset.", + "bbox": [ + 109, + 328, + 883, + 435 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/3765c58d0679b30005a3a6896f3f4d67936c40efceb092bbd2821ab8663df4b7.jpg", + "image_caption": [ + "Figure S.1 | Distribution of TDC task sizes, aggregated over train, validation, and test sets. For tasks containing multiple datasets, such as ToxCast which contains data for more than 600 different assays, the task size is calculated by summing over the sizes for each dataset." + ], + "image_footnote": [], + "bbox": [ + 331, + 452, + 668, + 619 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Tables S.4 and S.5 provide a brief description of the tasks, as well as the types of inputs (e.g. protein, small molecules, etc.). These tasks are diverse and encompass many different aspects of development. Some tasks corresponding to gene-disease association or protein-protein interaction prediction are useful for early-stage development, in order to identify mechanisms of disease and relevant targets. Predictions of antibody affinity, drug-target interaction, high-throughput screening, drug synergy are useful for intermediate development steps that involve proposing candidate therapeutics based on their interaction with a target. Predictions of toxicity, pharmacokinetics, and developability are useful for filtering candidates down based on favorable druglike properties. Predictions of clinical trial outcome, reaction yields, retrosynthesis are useful for late-stage development where understanding the likelihood of clinical trial approval and manufacturing potential are critical. There are also tasks that are highly specific for particular therapeutics types, which include predictions of CRISPR repair, peptide-MHC binding, miRNA-Target interaction, and TCR-epitope binding.", + "bbox": [ + 109, + 703, + 885, + 872 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Binary classification tasks always output “(A)” or “(B)”, where “(A)” is a negative answer to the question which is specified in the prompt and “(B)” is a positive answer. Regression tasks output an integer between", + "bbox": [ + 109, + 876, + 883, + 907 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "0 and 1000, which can be transformed back into the original task-specific label space. The output of the USPTO generation task is the SMILES string of the predicted molecules. Table S.6 lists the different types of inputs in the processed TDC data along with illustrative examples.", + "bbox": [ + 109, + 90, + 887, + 137 + ], + "page_idx": 25 + }, + { + "type": "table", + "img_path": "images/4d2824d594ed2d7abd228cce1d0df9ff221c1c6c2479fa2ed84c0df88c6e7cac.jpg", + "table_caption": [ + "Table S.1 | Excluded TDC tasks and reasons for exclusion. The tasks were excluded primarily due to their relevance to the study, limitations inherent to large language models (LLMs), and specific data characteristics, such as a lack of clear evaluation metrics or redundancy." + ], + "table_footnote": [], + "table_body": "
Task NameReason for Exclusion
QM7bPrediction of quantum properties is not closely related to therapeutic development.
QM8Prediction of quantum properties is not closely related to therapeutic development.
QM9Prediction of quantum properties is not closely related to therapeutic development.
IEDB JespersenAmount of data is small, and token prediction is more difficult to implement in a LLM than binary classification.
PDB JespersenAmount of data is small, and token prediction is more difficult to implement in a LLM than binary classification.
DrugBank DDILarge number of possible labels is difficult to implement in a LLM.
TWOSIDESLarge number of possible labels is difficult to implement in a LLM.
USPTO CatalystLarge number of possible labels is difficult to implement in a LLM.
MOSESNo clear metric.
ZINCNo clear metric.
ChEMBLNo clear metric.
USPTO 50KSubset of USPTO.
USPTO ReactionSame data as USPTO.
", + "bbox": [ + 117, + 207, + 879, + 489 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 862, + 936, + 883, + 949 + ], + "page_idx": 25 + }, + { + "type": "table", + "img_path": "images/b32c8ecb923b6ef6b0bb7d90b88ccffdb27bb8885edf80d5efadf8fd7a85e95f.jpg", + "table_caption": [ + "Table S.2 | Number of samples in training, validation, and test sets for all binary classification tasks. The binary classification tasks range in size from a minimum of 196 samples (Carcinogens Lagunin) to a maximum of 1,406,988 samples (butkiewicz), highlighting the considerable variability in data availability across different tasks. The task type and split type are also indicated following the TDC classification and recommendation." + ], + "table_footnote": [ + "* To predict whether compounds have Anti-HIV properties." + ], + "table_body": "
Task NameTask TypeSplit TypeTraining SizeValidation SizeTest Size
AMESToxicityScaffold5,0937281,457
BBB MartinsPharmacokineticsScaffold1,421203406
Bioavailability MaPharmacokineticsScaffold1,344192384
CYP1A2 VeithPharmacokineticsScaffold8,8051,2572,517
CYP2C19 VeithPharmacokineticsScaffold8,8651,2662,534
CYP2C9 Substrate CarbonMangelsPharmacokineticsScaffold46767135
CYP2C9 VeithPharmacokineticsScaffold8,4631,2102,419
CYP2D6 Substrate CarbonMangelsPharmacokineticsScaffold46567135
CYP2D6 VeithPharmacokineticsScaffold9,1911,3132,626
CYP3A4 Substrate CarbonMangelsPharmacokineticsScaffold46867135
CYP3A4 VeithPharmacokineticsScaffold8,6281,2332,467
Carcinogens LaguninToxicityScaffold1962856
ClinToxToxicityScaffold1,034147297
DILIToxicityScaffold3255496
HIA HouPharmacokineticsScaffold40358117
HIV*High-throughput screeningScaffold28,7884,1128,227
HuRIProtein-protein interactionCold-start45,8559873,694
MHC1 IEDB IMGT NielsenPeptide-MHC bindingRandom130,19018,59837,197
MHC2 IEDB JensenPeptide-MHC bindingRandom93,99713,42826,856
PAMPA NCATSPharmacokineticsScaffold1,423203408
Pgp BrocatelliPharmacokineticsScaffold851122245
SARSCOV2 3CLPro DiamondHigh-throughput screeningScaffold61688176
SARSCoV2 Vitro TouretHigh-throughput screeningScaffold1,038148298
SAbDab ChenDevelopabilityRandom1,686241482
Skin ReactionToxicityScaffold2824082
Tox21ToxicityScaffold54,5567,79015,600
ToxCastToxicityScaffold1,073,279153,099307,282
butkiewiczHigh-throughput screeningRandom1,406,988200,99840,1997
hERGToxicityScaffold45766132
hERG KarimToxicityScaffold9,4111,3442,690
herg centralToxicityScaffold214,82530,68961,379
miRTarBasemiRNA-target interactionRandom559,59179,948159,889
phase1Clinical trial outcomeCold-start1,546258598
phase2Clinical trial outcomeCold-start5,7927161,282
phase3Clinical trial outcomeCold-start41,255321,084
weberTCR-epitope bindingCold-start33,0134,7489,421
", + "bbox": [ + 75, + 239, + 924, + 821 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 26 + }, + { + "type": "table", + "img_path": "images/0bb72c425ec48ac5375fe73446d4fae9bb535296e49f9ec67b1508fd86755108.jpg", + "table_caption": [ + "Table S.3 | Number of samples in training, validation, and test sets for all regression and generation tasks. The regression and generation tasks vary significantly in size, ranging from a minimum of 345 samples (Protein SAbDab) to a maximum of 775,767 samples (USPTO). The task type and split type are also indicated following the TDC classification and recommendation." + ], + "table_footnote": [], + "table_body": "
Task NameTask TypeSplit TypeTraining SizeValidation SizeTest Size
BindingDB PatentDrug-target interactionTemporal146,80036,63049,028
BindingDB ic50Drug-target interactionCold-start375,1277,53131,495
BindingDB kdDrug-target interactionCold-start19,0343762,321
BindingDB kiDrug-target interactionCold-start57,6561,1894,709
Buchwald HartwigReaction yieldsRandom2,768396791
Caco2 WangPharmacokineticsScaffold63791182
Clearance Hepatocyte AZPharmacokineticsScaffold848122243
Clearance Microsome AZPharmacokineticsScaffold770111221
DAVISDrug-target interactionCold-start12,4552661,064
DisGeNETGene-disease associationRandom39,4255,62111,200
DrugComb BlissDrug synergyCombination207,77229,61859,708
DrugComb CSSDrug synergyCombination207,77229,61859,708
DrugComb HSADrug synergyCombination207,77229,61859,708
DrugComb LoeweDrug synergyCombination207,77229,61859,708
DrugComb ZIPDrug synergyCombination207,77229,61859,708
GDSC1Drug responseRandom124,11717,73135,462
GDSC2Drug responseRandom64,8929,27018,541
Half Life ObachPharmacokineticsScaffold46567135
KIBADrug-target interactionCold-start59,3261,0424,524
LD50 ZhuToxicityScaffold5,1687391,478
LeenayCRISPR repairRandom5,3257601,520
Lipophilicity AstraZenecaPharmacokineticsScaffold2,940420840
OncoPolyPharmacologyDrug synergyCombination16,0142,3314,707
PPBR AZPharmacokineticsScaffold1,952279559
Protein SAbDabAntibody affinityRandom3454999
Solubility AqSolDBPharmacokineticsScaffold6,9889981,996
TAPDevelopabilityRandom845120240
USPTORetrosynthesisRandom775,767110,824221,648
USPTO YieldsReaction yieldsRandom597,54685,364170,728
VDss LombardoPharmacokineticsScaffold791113226
", + "bbox": [ + 107, + 268, + 890, + 805 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 27 + }, + { + "type": "table", + "img_path": "images/9fb69e97ac4ee81f26e53d78fe24373e98bbbb61c0bd4b22e0baf2713a888d55.jpg", + "table_caption": [ + "Table S.4 | Inputs and task descriptions for binary classification tasks. All output responses are either (A) for negative or (B) for positive." + ], + "table_footnote": [ + "* To predict whether compounds have Anti-HIV properties." + ], + "table_body": "
Task NameInputDescription
AMESSmall moleculeGiven a drug SMILES, predict whether it is mutagenic.
BBB MartinsSmall moleculeGiven a drug SMILES, predict whether it can cross the blood-brain barrier.
Bioavailability MaSmall moleculeGiven a drug SMILES, predict whether it is orally available.
CYP1A2 VeithSmall moleculeGiven a drug SMILES, predict whether it inhibits CYP1A2.
CYP2C19 VeithSmall moleculeGiven a drug SMILES, predict whether it inhibits CYP2C19.
CYP2C9 Substrate CarbonMangelsSmall moleculeGiven a drug SMILES, predict whether it is a substrate to CYP2C9.
CYP2C9 VeithSmall moleculeGiven a drug SMILES, predict whether it inhibits CYP2C9.
CYP2D6 Substrate CarbonMangelsSmall moleculeGiven a drug SMILES, predict whether it is a substrate to CYP2D6.
CYP2D6 VeithSmall moleculeGiven a drug SMILES, predict whether it inhibits CYP2D6.
CYP3A4 Substrate CarbonMangelsSmall moleculeGiven a drug SMILES, predict whether it is a substrate to CYP3A4.
CYP3A4 VeithSmall moleculeGiven a drug SMILES, predict whether it inhibits CYP3A4.
Carcinogens LaguninSmall moleculeGiven a drug SMILES, predict whether it is a carcinogen.
ClinToxSmall moleculeGiven a drug SMILES, predict whether it is toxic.
DILISmall moleculeGiven a drug SMILES, predict whether it can cause liver injury.
HIA HouSmall moleculeGiven a drug SMILES, predict whether it is absorbed in the human intestine.
HIV*Small moleculeGiven a drug SMILES, predict whether it has anti-HIV activity.
HuRIProteinGiven the amino acid sequences of two proteins, predict whether the proteins interact.
MHC1 IEDB IMGT NielsenProteinGiven the amino acid of the peptide and pseudo amino acid of MHC 1, predict whether the peptide binds to the MHC.
MHC2 IEDB JensenProteinGiven the amino acid of the peptide and pseudo amino acid of MHC 2, predict whether the peptide binds to the MHC.
PAMPA NCATSSmall moleculeGiven a drug SMILES, predict whether it is permeable in a PAMPA assay.
Pgp BroccatelliSmall moleculeGiven a drug SMILES, predict whether it inhibits Pgp.
SARSCOV2 3CLPro DiamondSmall moleculeGiven a drug SMILES, predict whether it binds SARS-CoV-2 3CL protease.
SARSCOV2 Vitro TouretSmall moleculeGiven a drug SMILES, predict whether it inhibits SARS-CoV-2 replication.
SAbDab ChenProteinGiven an antibody heavy chain and light chain sequence, whether it is developable.
Skin ReactionSmall moleculeGiven a drug SMILES, predict whether it can cause skin reaction.
Tox21Small moleculeGiven a drug SMILES, predict whether it is toxic in various assays.
ToxCastSmall moleculeGiven a drug SMILES, predict whether it is toxic in various assays.
butkiewiczSmall moleculeGiven a drug SMILES, predict whether it is active against various proteins.
hERGSmall moleculeGiven a drug SMILES, predict whether it blocks hERG.
hERG KarimSmall moleculeGiven a drug SMILES, predict whether it inhibits hERG.
herg centralSmall moleculeGiven a drug SMILES, predict whether it inhibits hERG.
miRTarBase phase1Nucleic acid & proteinGiven the miRNA mature and target amino acid, predict whether they interact.
phase2Small molecule & diseaseGiven a drug SMILES and disease, predict whether the phase 1 trial will be approved.
phase3Small molecule & diseaseGiven a drug SMILES and disease, predict whether the phase 2 trial will be approved.
weberSmall molecule & diseaseGiven a drug SMILES and disease, predict whether the phase 3 trial will be approved.
ProteinGiven the amino acid of the epitope and a T-cell receptor (amino acid of the hypervariable CDR3 loop), predict whether the epitope binds to the TCR.
", + "bbox": [ + 86, + 179, + 911, + 848 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 28 + }, + { + "type": "table", + "img_path": "images/6c106798dfbe627a04549474626d78648a113dce271a2871c66142c409a94aba.jpg", + "table_caption": [ + "Table S.5 | Inputs and task descriptions for regression and generation tasks. Regression task outputs are integers between 0 and 1000, which represents a binned transformation of the original numeric label. On evaluation, the integer output is transformed back into the original numeric label space. For the USPTO generation task, the output is the SMILES string of the predicted set of small molecules." + ], + "table_footnote": [], + "table_body": "
Task NameInputDescription
BindingDB PatentProtein & small moleculeGiven the target amino acid and drug SMILES, predict their binding affinity.
BindingDB ic50ProteinGiven the target amino acid and drug SMILES, predict their IC50.
BindingDB kdProteinGiven the target amino acid and drug SMILES, predict their Kd.
BindingDB kiProteinGiven the target amino acid and drug SMILES, predict their Ki.
Buchwald HartwigSmall moleculeGiven a product, a catalyst, and a reactant SMILES, predict the reaction yield.
Caco2 WangSmall moleculeGiven a drug SMILES, predict the cell effective permeability.
Clearance Hepatocyte AZSmall moleculeGiven a drug SMILES, predict the activity of hepatocyte clearance.
Clearance Microsome AZSmall moleculeGiven a drug SMILES, predict the activity of microsome clearance.
DAVISProtein & small moleculeGiven the target amino acid and drug SMILES, predict their binding affinity.
DisGeNETProtein & diseaseGiven the disease description and the amino acid of the gene, predict their association.
DrugComb BlissSmall molecule & cell lineGiven two drug SMILESs and a cell line description, predict the drug synergy level.
DrugComb CSSSmall molecule & cell lineGiven two drug SMILESs and a cell line description, predict the drug synergy level.
DrugComb HSASmall molecule & cell lineGiven two drug SMILESs and a cell line description, predict the drug synergy level.
DrugComb LoeweSmall molecule & cell lineGiven two drug SMILESs and a cell line description, predict the drug synergy level.
DrugComb ZIPSmall molecule & cell lineGiven two drug SMILESs and a cell line description, predict the drug synergy level.
GDSC1Small molecule & cell lineGiven a drug SMILES and a cell line description, predict the drug sensitivity level.
GDSC2Small molecule & cell lineGiven a drug SMILES and a cell line description, predict the drug sensitivity level.
Half Life ObachSmall moleculeGiven a drug SMILES, predict the half life duration.
KIBAProtein & small moleculeGiven the target amino acid and drug SMILES, predict their binding affinity.
LD50 ZhuSmall moleculeGiven a drug SMILES, predict its LD50 toxicity.
LeenayNucleic acidGiven a GuideSeq sequence, predict various properties.
Lipophilicity AstraZenecaSmall moleculeGiven a drug SMILES, predict the lipophilicity.
OncoPolyPharmacologyCell line & small moleculeGiven two drug SMILESs and a cell line description, predict the drug synergy level.
PPBR AZSmall moleculeGiven a drug SMILES, predict the plasma protein binding rate.
Protein SAbDabProteinGiven the amino acid of the antibody and antigen, predict the binding affinity.
Solubility AqSolDBSmall moleculeGiven a drug SMILES, predict the activity of solubility.
TAPProteinGiven an antibody heavy chain and light chain sequence, predict its CDR length.
USPTOSmall moleculeGiven the product SMILES, generate the reactant SMILESs.
USPTO YieldsSmall moleculeGiven a catalyst SMILES, reactant SMILES, and product SMILES, predict the yield.
VDss LombardoSmall moleculeGiven a drug SMILES, predict the volume of distributon.
", + "bbox": [ + 76, + 236, + 921, + 839 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 29 + }, + { + "type": "table", + "img_path": "images/d7dcc741662b146f89dbd3da83f6492c95c1c43ce8fb40ae16b5fbee3c7eaae9.jpg", + "table_caption": [ + "Table S.6 | Types of drugs and targets found in our data. Features found in our data as well as their textual representation and an illustrative example. Protein sequences are divided into several subtypes: some proteins and peptides are represented using their full amino acid sequence whereas MHC molecules are represented using the amino acid pseudo-sequences that only use residues in contact with a peptide, and TCRs only use CDR3 hypervariable loops." + ], + "table_footnote": [ + "† Only for residues in contact with a peptide." + ], + "table_body": "
Representation TypeRepresentationExample
Small MoleculesSMILES stringCN1C(=O)CN=C(C2=CCCCC2)c2cc(Cl)ccc21
Amino Acid: Proteins and peptidesAmino acid sequencesQLADETLLKV
Amino Acid: MHC moleculesPseudo-sequences †YFAMYGEKVAHTHVDTLYVRYHYYTWAEWAYTWY
Amino Acid: T cell receptorsCDR3 hypervariable loopsCSASEGTSSYEQYF
Nucleic acidNucleotide sequenceACAGCCCAGCAGUUUAUCACGGG
DiseaseEnglish textChronic myeloproliferative disease
Cell LineEnglish textNU-1, stomach cell sourced from cancer
", + "bbox": [ + 117, + 465, + 879, + 589 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 862, + 936, + 882, + 948 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "C Method details", + "text_level": 1, + "bbox": [ + 109, + 89, + 289, + 104 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "This section elaborates on the modeling choices employed in the development of TxGemma. Tables S.7 and S.8 illustrate prompts used for binary classification, regression, and generation tasks, showcasing the input structure for the model including the instructions and context provided to the model. Table S.9 provide a concrete example of few-shot prompting applied to a binary classification task using 10 examples with nearest-neighbor shots. Each dataset in our data is structured as a text prompt, consisting of instructions, context, a question, and the corresponding answer. To provide relevant background, we created 2-3 sentence contexts based on TDC dataset descriptions and literature searches. Prompts used for predicting adverse events in clinical trials based on the TrialBench dataset [1] are shown in Table S.10. To illustrate the reasoning process of Agentic-Tx, Table S.11 provides an example of the steps taken to answer a chemical preference question from ChemBench. Table S.12 also provides a comprehensive list of the tools available of Agentic-Tx. Section C.1 provides details of the Wilcoxon signed-rank test used to assess the performance of our models across all tasks.", + "bbox": [ + 107, + 119, + 888, + 303 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "We utilize random data points from the training set for few-shot learning during training. Although we use nearest neighbor shots for evaluation, we opt for random shots during training due to the higher intra-set similarity observed within the training data compared to between training and test sets, as illustrated in Figure S.2.", + "bbox": [ + 107, + 308, + 887, + 369 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/a97b57e1e6b69f48df9b46566fd0c5232a634bcaa152ee1d7db532c7273836dd.jpg", + "image_caption": [ + "Figure S.2 | Distribution of the Tanimoto similarities for the 10 nearest neighbors in the AMES task. Nearest neighbors are calculated from the training set for training and validation sets, and from both the training and validation sets for the test set." + ], + "image_footnote": [], + "bbox": [ + 310, + 386, + 687, + 570 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "C.1 Aggregated method comparison", + "text_level": 1, + "bbox": [ + 109, + 660, + 439, + 678 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "For a pair of performances $(x_{i},y_{i})$ of a task $i$ , the test statistic of the Wilcoxon signed-rank test is calculated as the minimum of the positive-rank sum $(W^{+})$ and the negative-rank sum $(W^{-})$ ,", + "bbox": [ + 107, + 686, + 887, + 719 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\nW ^ {+} = \\sum_ {X _ {i} > 0} R _ {i} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 442, + 734, + 885, + 767 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\nW ^ {-} = \\sum_ {X _ {i} < 0} R _ {i} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 442, + 782, + 885, + 816 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "where $X_{i} = x_{i} - y_{i}$ and $R_{i}$ is the rank of $|x_{i} - y_{i}|$ . In order to account for the differences in magnitudes for MAE and MSE metrics, we normalized all performances by the mean of the performances from both models. We also reversed the sign of MAEs and MSEs because lower MAEs and MSEs correspond to better performances.", + "bbox": [ + 107, + 828, + 887, + 888 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 862, + 936, + 885, + 949 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Table S.7 | Example of prompts for binary classification tasks.", + "bbox": [ + 111, + 150, + 527, + 164 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Instructions: Answer the following question about drug properties.", + "bbox": [ + 124, + 186, + 545, + 199 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Context: As a membrane separating circulating blood and brain extracellular fluid, the blood-brain barrier (BBB) is the protection layer that blocks most foreign drugs. Thus the ability of a drug to penetrate the barrier to deliver to the site of action forms a crucial challenge in development of drugs for central nervous system.", + "bbox": [ + 122, + 199, + 872, + 238 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Question: Given a drug SMILES string, predict whether it", + "bbox": [ + 124, + 239, + 493, + 251 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "(A) does not cross the BBB (B) crosses the BBB", + "bbox": [ + 125, + 252, + 426, + 263 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Drug SMILES: $\\mathrm{CN1C(=O)CN = C(C2 = CCCCC2)c2cc(Cl)ccc21}$", + "bbox": [ + 125, + 265, + 511, + 277 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Answer: (B)", + "bbox": [ + 125, + 279, + 215, + 291 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Instructions: Answer the following question about peptide-MHC binding.", + "bbox": [ + 124, + 314, + 584, + 325 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Context: In the human body, T cells monitor the existing peptides and trigger an immune response if the peptide is foreign. To decide whether or not if the peptide is not foreign, the peptide must bind to a major histocompatibility complex (MHC) molecule. Therefore, predicting peptide-MHC binding affinity is pivotal for determining immunogenicity. In some experiments, the peptide binding is measured against cells that express multiple MHCs, so the peptide could be binding any one of the possible MHCs. Class 1 MHC molecules bind to peptides that are usually 8-14 amino acids long and activate CD8 T cells.", + "bbox": [ + 122, + 327, + 870, + 404 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Question: Given the amino acid sequence of the peptide and possible pseudo amino acid sequences of MHC 1, predict whether the peptide", + "bbox": [ + 122, + 406, + 870, + 431 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "(A) does not bind to any of the MHCs (B) binds to any of the MHCs", + "bbox": [ + 124, + 431, + 550, + 445 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Peptide amino acid sequence: QLADETLLKV", + "bbox": [ + 125, + 446, + 411, + 458 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Possible MHC pseudosequences: YFAMYGEKAVTHVDTLYVRYHYTTYEAWAYTWY", + "bbox": [ + 125, + 458, + 679, + 470 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Answer: (B)", + "bbox": [ + 125, + 472, + 215, + 484 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Instructions: Answer the following question about miRNA protein interactions.", + "bbox": [ + 124, + 507, + 620, + 518 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Context: MicroRNAs (miRNAs) are, small non-coding RNAs with 18-25 nucleotides, which are central regulators at the post-transcriptional level in both animals and plants. Perfect or near-perfect complementary binding of miRNAs and their target mRNA negatively regulates gene expression by accelerating mRNA degradation or suppressing mRNA translation.", + "bbox": [ + 122, + 520, + 870, + 559 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Question: Given the miRNA mature sequence and target amino acid sequence, predict whether", + "bbox": [ + 124, + 560, + 715, + 571 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "(A) the miRNA and target do not interact (B) the miRNA and target interact", + "bbox": [ + 124, + 573, + 604, + 585 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "miRNA sequence: UUCCUGUCAGCCGUGGGUGCC", + "bbox": [ + 125, + 585, + 460, + 598 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Target amino acid sequence: MSVNMDELRHQVMINQFVLAAGCAADQAKQLLQAAHWQFETALSTFFQET-NIPNSHHHHQMMCTPSNTPATPPNFPDALAMFSKLRASEGLQSSNSPMTAAACSPANFSPFWASSPPSHQAPWIP-PSSPTTFHLHRPQPTWPPGAQQGGAQQKAMAAMDGQR", + "bbox": [ + 122, + 599, + 870, + 637 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Answer: (A)", + "bbox": [ + 125, + 638, + 215, + 651 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Instructions: Answer the following question about clinical trials.", + "bbox": [ + 124, + 674, + 529, + 686 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Context: Clinical trial is the most time and cost-consuming step in the drug discovery process. Phase 1 clinical trials test the safety and basic properties of a new drug or treatment in a small group of people for the first time. Optimizing and designing trials with machine learning could drastically lead to the speedup of delivery of life-saving therapeutics to patients. Clinical trial outcome prediction is a machine learning task that aims to forecast the outcome of clinical trials, such as the approval rate of a drug or treatment. It utilizes various clinical trial features, including the drug's molecular structure and patient disease.", + "bbox": [ + 122, + 686, + 870, + 765 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Question: Given a drug SMILES string and disease, predict if the phase 1 trial", + "bbox": [ + 124, + 766, + 616, + 779 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "(A) would not be approved (B) would be approved", + "bbox": [ + 125, + 780, + 436, + 792 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Drug SMILES: COC1=NC(N)=NC2=C1N=CN2[C@@H]1O[C@H](CO)[C@@H](O)[C@@H]1O", + "bbox": [ + 125, + 792, + 702, + 806 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Disease: Chronic myeloproliferative disease", + "bbox": [ + 125, + 806, + 390, + 818 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Answer: (A)", + "bbox": [ + 125, + 819, + 215, + 832 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Table S.8 | Example of prompts for regression and generation tasks.", + "bbox": [ + 111, + 176, + 565, + 191 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Instructions: Answer the following question about drug properties.", + "bbox": [ + 124, + 213, + 545, + 224 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Context: The human colon epithelial cancer cell line, Caco-2, is used as an in vitro model to simulate the human intestinal tissue. The experimental result on the rate of drug passing through the Caco-2 cells can approximate the rate at which the drug permeates through the human intestinal tissue.", + "bbox": [ + 122, + 226, + 870, + 263 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Question: Given a drug SMILES string, predict its normalized Caco-2 cell effective permeability from 000 to 1000, where 000 is minimum permeability and 1000 is maximum permeability.", + "bbox": [ + 122, + 266, + 870, + 291 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Drug SMILES: $\\mathrm{O} = \\mathrm{C}(\\mathrm{O})\\mathrm{{COC}}\\left( { = \\mathrm{O}}\\right) \\mathrm{{Cc}}1\\text{ccc}\\mathrm{{cc}}1\\mathrm{{Nc}}1\\mathrm{{c}}\\left( \\mathrm{{Cl}}\\right) \\mathrm{{ccc}}1\\mathrm{{Cl}}$", + "bbox": [ + 125, + 292, + 500, + 304 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Answer: 788", + "bbox": [ + 125, + 305, + 215, + 316 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Instructions: Answer the following question about drug responses.", + "bbox": [ + 124, + 340, + 540, + 353 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Context: The same drug compound could have various levels of responses in different patients. To design drug for individual or a group with certain characteristics is the central goal of precision medicine. In experiments, IC50s of drugs were measured against cancer cell lines.", + "bbox": [ + 122, + 354, + 870, + 392 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Question: Given a drug SMILES string and a cell line description, predict the normalized drug sensitivity from 000 to 1000, where 000 is minimum drug sensitivity and 1000 is maximum drug sensitivity.", + "bbox": [ + 122, + 393, + 872, + 417 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Drug SMILES: $\\mathrm{CN1C = C(C2 = CC = CC = C21) / C = C\\backslash 3 / C4 = C(C = CC = N4)NC3 = O}$", + "bbox": [ + 124, + 419, + 630, + 431 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Cell line description: SNU-1, stomach cell sourced from cancer", + "bbox": [ + 124, + 433, + 509, + 445 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Answer: 615", + "bbox": [ + 125, + 446, + 215, + 457 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Instructions: Answer the following question about drug target interactions.", + "bbox": [ + 124, + 481, + 594, + 493 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Context: Drug-target binding is the physical interaction between a drug and a specific biological molecule, such as a protein or enzyme. This interaction is essential for the drug to exert its pharmacological effect. The strength of the drug-target binding is determined by the binding affinity, which is a measure of how tightly the drug binds to the target. Kd is the dissociation constant of a drug-target complex. It is the concentration of drug at which half of the drug-target complexes have dissociated. A lower Kd value indicates a stronger binding affinity.", + "bbox": [ + 122, + 494, + 870, + 559 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Question: Given the target amino acid sequence and compound SMILES string, predict their normalized binding affinity Kd from 000 to 1000, where 000 is minimum Kd and 1000 is maximum Kd.", + "bbox": [ + 122, + 560, + 870, + 584 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Drug SMILES: $\\mathrm{O = S(=O)(O)c1cccc2ccc(Nc3cccccc)3c12}$", + "bbox": [ + 124, + 585, + 468, + 598 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Target amino acid sequence: MATVQQLEGRWRLVDSKGFDEYMKELGVIALRKMGAMKPDCIITCDGKNLTIKTESTLKITTQFSCTLGEKFETTADGRKTQTVCNFTDGALVHQWEWDGKESTITRKLKDGLVVECVMNNVTCTRIYEKVE", + "bbox": [ + 122, + 599, + 872, + 637 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Answer: 397", + "bbox": [ + 125, + 638, + 215, + 650 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Instructions: Answer the following question about reactions.", + "bbox": [ + 124, + 674, + 506, + 686 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Context: Retrosynthesis is the process of finding a set of reactants that can synthesize a target molecule, i.e., product, which is a fundamental task in drug manufacturing. The target is recursively transformed into simpler precursor molecules until commercially available \"starting\" molecules are identified. In a data sample, there is only one product molecule, reactants can be one or multiple molecules.", + "bbox": [ + 122, + 686, + 870, + 739 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Question: Given a product SMILES string, predict the reactant SMILES string.", + "bbox": [ + 124, + 739, + 622, + 753 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Product SMILES: [CH2:12]1[C:7]2([CH2:6][CH2:5][O:15][CH2:1][CH2:8]2)[CH2:13][CH2:14][O:10][C:11]1=[O:17]", + "bbox": [ + 124, + 753, + 803, + 766 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Answer: [CH:1]12B[CH:5]([CH2:6][CH2:7][CH2:8]1)CCC2.[O:10]1[CH2:14][CH2:13][CH2:12] [CH2:11]1.[OH:15].[Na+].[OH:17]O.CI", + "bbox": [ + 124, + 766, + 872, + 792 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "34", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Table S.9 | Example of a 10-shot prompt for a binary classification task. Shots are selected from nearest neighbors in the combined training and validation set (not the test set).", + "bbox": [ + 109, + 212, + 885, + 244 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Instructions: Answer the following question about drug properties.", + "bbox": [ + 125, + 263, + 545, + 277 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Context: As a membrane separating circulating blood and brain extracellular fluid, the blood-brain barrier (BBB) is the protection layer that blocks most foreign drugs. Thus the ability of a drug to penetrate the barrier to deliver to the site of action forms a crucial challenge in development of drugs for central nervous system.", + "bbox": [ + 124, + 277, + 872, + 316 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Question: Given a drug SMILES string, predict whether it (A) does not cross the BBB (B) crosses the BBB", + "bbox": [ + 125, + 329, + 795, + 344 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Drug SMILES: $\\mathrm{CN1C(=O)CN = C(c2cccccc)2c2cc(Cl)ccc21}$", + "bbox": [ + 125, + 351, + 478, + 364 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Answer: (B)", + "bbox": [ + 125, + 366, + 204, + 378 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Drug SMILES: $\\mathrm{CN1C(=O)CN = C(c2cccccc2F)c2cc(Cl)ccc21}$", + "bbox": [ + 125, + 391, + 486, + 405 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Answer: (B)", + "bbox": [ + 125, + 406, + 204, + 417 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Drug SMILES: $\\mathrm{CN1C(=S)CN = C(c2cccccc)2c2cc(Cl)ccc21}$", + "bbox": [ + 125, + 430, + 473, + 444 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Answer: (B)", + "bbox": [ + 125, + 445, + 204, + 457 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Drug SMILES: CP(C)(=O)CN1C(=O)CN=C(c2cccccc2)c2cc(Cl)ccc21", + "bbox": [ + 125, + 469, + 550, + 484 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Answer: (B)", + "bbox": [ + 125, + 484, + 204, + 497 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Drug SMILES: $\\mathrm{CN1C(=O)CN = C(c2cccccc)2c2cc([N + ](=O)[O - ])ccc21}$", + "bbox": [ + 125, + 508, + 547, + 523 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Answer: (B)", + "bbox": [ + 125, + 523, + 204, + 536 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Drug SMILES: CCN(CC)CCN1C(=O)CN=C(c2cccccc2F)c2cc(Cl)ccc21", + "bbox": [ + 125, + 549, + 557, + 563 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Answer: (B)", + "bbox": [ + 125, + 564, + 204, + 575 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Drug SMILES: $\\mathrm{O} = \\mathrm{C}1\\mathrm{{CN}} = \\mathrm{C}\\left( {c2\\text{ccc}cc2}\\right) c2\\mathrm{{cc}}\\left( \\mathrm{{Cl}}\\right) \\mathrm{{ccc}}2\\mathrm{\\;N}1\\mathrm{{CC}}1\\mathrm{{CC}}1$", + "bbox": [ + 125, + 588, + 509, + 601 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Answer: (B)", + "bbox": [ + 125, + 602, + 204, + 614 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Drug SMILES: C#CCN1C(=O)CN=C(c2cccc2)c2cc(Cl)ccc21", + "bbox": [ + 125, + 627, + 509, + 641 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Answer: (B)", + "bbox": [ + 125, + 642, + 204, + 654 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Drug SMILES: $\\mathrm{O} = \\mathrm{C}1\\mathrm{{CN}} = \\mathrm{C}\\left( {\\mathrm{c}2\\text{ccc} : 2}\\right) \\mathrm{c}2\\mathrm{{cc}}\\left( \\mathrm{{Cl}}\\right) \\mathrm{{ccc}}2\\mathrm{\\;N}1\\mathrm{{CC}}\\left( \\mathrm{F}\\right) \\left( \\mathrm{F}\\right) \\mathrm{F}$", + "bbox": [ + 125, + 667, + 526, + 681 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Answer: (B)", + "bbox": [ + 125, + 681, + 204, + 694 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Drug SMILES: $\\mathrm{CCS}(\\mathrm{=O})(\\mathrm{=O})\\mathrm{CCN1C}(\\mathrm{=O})\\mathrm{CN} = \\mathrm{C}(\\mathrm{c2cccccc2F})\\mathrm{c2cc(Cl)ccc21}$", + "bbox": [ + 125, + 707, + 588, + 720 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Answer: (B)", + "bbox": [ + 125, + 720, + 204, + 733 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Drug SMILES: $\\mathrm{CN1C(=O)CN = C(C2 = CCCCCC2)c2cc(Cl)ccc21}$", + "bbox": [ + 125, + 742, + 511, + 756 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Answer: (B)", + "bbox": [ + 125, + 757, + 215, + 768 + ], + "page_idx": 34 + }, + { + "type": "page_number", + "text": "35", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Table S.10 | Example of prompts for predicting adverse events in clinical trials. The top prompt only provides drug SMILES strings while the bottom prompt also includes textual information about the clinical trial.", + "bbox": [ + 109, + 306, + 885, + 335 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "From the following information about a clinical trial, predict whether it would have an adverse event.", + "bbox": [ + 125, + 347, + 745, + 361 + ], + "page_idx": 35 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Drug: CC[C@H]1[C@@H](COC2=C3C=C(OC)C(=CC3=CC=N2)C(N)=O)NC(=O)[C@H]1F.[H][C@@]12CC[C@H](O)[C@@]1(C)CC[C@]1([H])C3=C(CC[C@@]21[H])C=C(O)C=C3", + "guess_lang": "javascript", + "bbox": [ + 125, + 373, + 702, + 401 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Answer: No", + "bbox": [ + 125, + 414, + 210, + 426 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "From the following information about a clinical trial, predict whether it would have an adverse event.", + "bbox": [ + 124, + 465, + 745, + 479 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Title: A Study To Estimate The Effect of PF-06650833 On The Pharmacokinetics (PK) of Oral Contraceptive (OC) Summary: This is a Phase 1, open label, fixed sequence study of the effect of multiple dose PF-06650833 on single dose OC PK in healthy female subjects.", + "bbox": [ + 124, + 492, + 872, + 532 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Phase: 1", + "bbox": [ + 125, + 532, + 183, + 542 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Disease: Healthy", + "bbox": [ + 125, + 545, + 232, + 556 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Minimum age: 18 Years", + "bbox": [ + 125, + 559, + 274, + 570 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Maximum age: 60 Years", + "bbox": [ + 125, + 571, + 276, + 584 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Healthy volunteers: Accepts Healthy Volunteers", + "bbox": [ + 125, + 585, + 419, + 597 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Interventions: $400\\mathrm{mg}$ by mouth (PO) Once daily (QD) for 11 days; Single dose of Oral tablet containing 30 ug EE and 150 ug of LN", + "bbox": [ + 125, + 598, + 870, + 623 + ], + "page_idx": 35 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Drug: CC[C@H]1[C@@H](COC2=C3C=C(OC)C(=CC3=CC=N2)C(N)=O)NC(=O)[C@H]1F.[H][C@@]12CC[C@H](O)[C@@]1(C)CC[C@]1([H])C3=C(CC[C@@]21[H])C=C(O)C=C3", + "guess_lang": "javascript", + "bbox": [ + 125, + 625, + 700, + 651 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Answer: No", + "bbox": [ + 125, + 662, + 210, + 675 + ], + "page_idx": 35 + }, + { + "type": "page_number", + "text": "36", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 35 + }, + { + "type": "table", + "img_path": "images/b9d2298fd6339b4d44d87216064cb18663a965c2d62e1de82868a8b0f2abce73.jpg", + "table_caption": [ + "Table S.11 | Example of Agentic-Tx ChemBench chemical preference question answering." + ], + "table_footnote": [], + "table_body": "
Example of Chembench Chemical Preference
QuestionConsider the following two drug candidates: \nA. N#Cclc(NC(=O)c2cc([N+](=O)[O-])ccc2N2CCOCC2)sc2c1CCCC2 \nB. O=C(C=Cclc00001)c1cccc1. \nWhich is more preferable for further development?
Thought 1First, I need to obtain more information about each drug candidate. I will use the SMILES to Description tool to get detailed descriptions of each molecule from their SMILES strings, starting with candidate.
Action 1SMILES to Description \nInput SMILES: N#Cclc(NC(=O)c2cc([N+](=O)[O-])ccc2N2CCOCC2)sc2c1CCCC2
Observation 1PubChem CID: 3934361 \nMolecular Formula: C20H20N4O4S \nMolecular Weight: 412.5 \nCharge: 0 \nIUPAC Name: N-(3-cyano-4,5,6,7-tetrahydro-1-benzothiophen-2-yl)-2-morpholin-4-yl-5-nitrobenzamide XLogP: 3.8 \nSynonyms: MLS000335194, CHEMBL1549645, HMS2597A10, HMS3379H10, AKOS001044982, SMR000249952, SR-01000056848, SR-01000056848-1, Z27367728 \n(Summarized) The provided information describes a single molecule (PubChem CID 3934361) with a cyano-tetrahydro-benzothiophene core, a morpholino-nitrobenzamide substituent, and an XLogP of 3.8, indicating its lipophilicity.
Thought 2Now, I will use the SMILES to Description tool to get detailed descriptions of candidate B from its SMILES string. After that, I will compare the information obtained for both candidates to determine which is more preferable for further development.
Action 2SMILES to Description \nInput SMILES: O=C(C=Cclc00001)c1cccc1
Observation 2PubChem CID: 219207 \nMolecular Formula: C14H11NO \nMolecular Weight: 209.24 \nCharge: 0 \nIUPAC Name: 1-phenyl-3-pyridin-2-ylprop-2-en-1-one XLogP: 2.7 \nSynonyms: 3-(2-PYRIDYL)-ACRYLOPHENONE, MLS002637493, azachalcone, CHEMBL1717486, DTXSID601279307, HMS3079I05, SMR001547031, 1-Phenyl-3-(2-pyridinyl)-2-propen-1-one \n(Summary) The molecule (B) represented by the SMILES O=C(C=Cclc00001)c1cccc1, also known as 1-phenyl-3-pyridin-2-ylprop-2-en-1-one, has a molecular weight of 209.24 g/mol and a calculated XLogP value of 2.7.
Observation 2
Thought 3Based on the descriptions, candidate B (XLogP = 2.7) is less lipophilic than candidate A (XLogP = 3.8). Lower lipophilicity can often be associated with better absorption and distribution properties. I will now use the ClinicalTox tool to assess the clinical toxicity of candidate B.
Action 3ClinicalTox \nInput SMILES: O=C(C=Cclc00001)c1cccc1
Observation 3Context: Humans are exposed to a variety of chemicals through food, household products, and medicines, some of which can be toxic, leading to over 30% of promising pharmaceuticals failing in human trials due to toxicity. Toxic drugs can be identified from clinical trials that failed due to toxicity, while non-toxic drugs can be identified from FDA approval status or from clinical trials that report no toxicity. Prediction returned: O=C(C=Cclc00001)c1cccc1 is not toxic! \n(Summary) Based on the provided information, drug candidate B (O=C(C=Cclc00001)c1cccc1) is predicted to be non-toxic, suggesting it might be more preferable for further development.
Observation 3
Final ResponseCandidate B is more preferable for further development. Here's why: \n-Lipophilicity (XLogP): Candidate B has a lower XLogP value (2.7) compared to Candidate A (3.8). Lower lipophilicity is generally favored in drug development. Excessive lipophilicity (high XLogP) can lead to problems like poor solubility, increased risk of off-target binding and toxicity, and difficulties in achieving good absorption and distribution in the body. A value closer to the range of many oral drugs is preferred. \n-Toxicity: The clinical toxicity tool states that candidate B is predicted to be non-toxic.
", + "bbox": [ + 112, + 132, + 897, + 897 + ], + "page_idx": 36 + }, + { + "type": "page_number", + "text": "37", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 36 + }, + { + "type": "table", + "img_path": "images/f505e8244b73734017b072226795a6c6f33623dd337bfbd862ee723a1ca44b5e.jpg", + "table_caption": [ + "Table S.12 | Descriptions of tools used by Agentic-Tx." + ], + "table_footnote": [], + "table_body": "
Tool NameDescription
ToxCastUses TxGemma to predict the toxicity of a given drug (SMILES string) in various ToxCast assays based on the provided context. Returns results indicating whether the drug is toxic or not in each selected assay.
ClinicalToxUses TxGemma to predict the clinical toxicity of a given drug (SMILES string) for humans. Returns a result indicating whether the drug is predicted to be toxic or not.
ChatAllows conversational interaction with TxGemma-Chat. Enables posing therapeutics-related questions and receiving responses.
MutagenicityUses TxGemma to predict whether a given drug (SMILES) is mutagenic based on the Ames test. Returns a result indicating if the drug is mutagenic or not.
IC50Uses TxGemma to predict the normalized IC50 between a drug (SMILES) and a target protein (amino acid sequence). Returns a IC50 value, with lower values suggesting potent inhibition.
Phase 1 TrialUses TxGemma to predict the approval outcome of a Phase 1 clinical trial for a drug (SMILES) against a specified disease. Returns a result indicating whether the trial would be approved or not.
Wikipedia SearchSearches Wikipedia for a given text query. Returns the top matching article's title, link, and a short summary.
PubMed SearchQueries PubMed for scientific articles based on a search text. Returns metadata (PMID, title, authors, journal, date, abstract) for the top few articles.
Web SearchPerforms a general web search. Returns titles, links, and snippets for the top search results.
HTML FetchFetched the raw HTML content of a given URL. Useful for inspecting webpage details.
SMILES to DescriptionRetrieves molecular information from PubChem for a given SMILES string. Returns properties like PubChem CID, molecular formula, IUPAC name, XLogP, and synonyms.
SMILES TherapyRetrieves therapeutic information (ChEMBL ID, mechanisms of action, drug indications, ATC classifications) for a drug given its SMILES string.
Molecule ToolProvides molecule-related functions: searching for compounds by name (returns properties and IDs) and converting between molecular representations (InChI, SMILES, InChIKey, Mol).
Molecule ConvertConverts a molecules representation from one type to another (e.g., SMILES to InChI).
Gene SequenceRetrieves amino acid sequences for a given gene name and organism. Searches NCBI Nucleotide, fetches records, and translates DNA to protein sequences.
Gene DescriptionRetrieves descriptive information about a gene from NCBI Gene, including official symbol, full name, description, and summary.
BlastPRuns a BLASTP search against NCBI databases for a given amino acid sequence. Returns hits with gene names, organisms, and accessions.
Protein DescriptionProvides descriptive information (organism, definition, accession) for a protein, either by name or amino acid sequence. Uses NCBI Protein database or BLASTP.
", + "bbox": [ + 120, + 203, + 877, + 824 + ], + "page_idx": 37 + }, + { + "type": "page_number", + "text": "38", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "D Additional results", + "text_level": 1, + "bbox": [ + 109, + 90, + 316, + 104 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "D.1 TxGemma-Predict performance", + "text_level": 1, + "bbox": [ + 109, + 122, + 436, + 138 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Figure S.4 compares TxGemma-27B-Predict with previous SOTA models, taking into account that Tx-LLM M achieved SOTA performance on many tasks. We provide detailed results tables for binary classification tasks in Table S.13 (comparing against specialist SOTA and base models) and Table S.15 (comparing against TxGemma-Chat and Tx-LLM), and for regression and generation tasks in Table S.14 (comparing against specialist SOTA and base models) and Table S.16 (comparing against TxGemma-Chat and Tx-LLM). Tables S.17 and S.18 list the performances of released TxGemma models trained only on datasets with commercial licenses. Figures S.5 and S.6 compares TxGemma-27B-Predict with LlaSMol and MolE, models specialized for small molecules, on small molecule tasks. Figure S.12 plots the percentage of tasks that contain contaminated datapoints overlapping with the Gemma-2 pretraining data, the percent of contaminated datapoints for these tasks, and Figure S.13 shows the results of TxGemma-27B-Predict after filtering contaminated datapoints out. We observe that most tasks have no contamination, and filtering these datapoints out does not negatively impact TxGemma-27B-Predict performance. Figure S.16 plots performances for particular feature types across multiple model sizes, showing that the integration of SMILES strings and textual information is consistent. Figure S.17 plots performances over all tasks for comparisons of model size and domain fine-tuning, showing that these variables are significant. Figure S.18 shows that TxGemma-27B-Predict toxicity and clinical trial approval predictions are correlated, likely because toxicity in an important component of trial approval. Figure S.11 plots the inference speed, normalized by the number of chips used for serving, for all model sizes.", + "bbox": [ + 109, + 148, + 888, + 409 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "D.2 Conversing with TxGemma-27B-Predict and TxGemma-27B-Chat", + "text_level": 1, + "bbox": [ + 109, + 420, + 743, + 436 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Figure S.8 illustrates an example of providing a prompt to TxGemma-27B-Predict that is not in the processed data format. TxGemma-27B-Predict is able to provide a coherent response in a manner similar to the general LLMs. Figure S.9 illustrates an example of first providing a prompt to TxGemma-27B-Predict in the processed format and asking follow-up questions in subsequent turns. In the second turn, instructing the model to not in the processed data format is able to elicit a reasonable but succinct response. However, the third turn leads to the model answering in the processed data format, highlighting the difficulty of multi-turn dialogue after training only on the processed TDC data. Figure S.7 plots the performance of TxGemma-27B-Chat on the MMLU benchmark in comparison with both Gemma-2-27B and TxGemma-27B-Predict. TxGemma-27B-Chat performs similarly to Gemma-2-27B on MMLU while TxGemma-27B-Predict scores much lower. Figure S.10 shows an example of using a specific prompting structure with TxGemma-27B-Chat to elicit reasoning on a more challenging task of clinical trial approval. If this prompting structure is not used, the model refuses to provide reasoning.", + "bbox": [ + 109, + 446, + 888, + 630 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "D.3 Agentic-Tx Tool Use Analysis", + "text_level": 1, + "bbox": [ + 109, + 642, + 419, + 657 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Figure S.14 shows the tool usage frequency for different benchmarks, illustrating that Agentic-Tx dynamically adjusts its tool usage to suit the problem. Figure S.15 shows the most frequent tools used per question for chemical preference questions, showing consistent usage of molecule-based tools.", + "bbox": [ + 109, + 667, + 885, + 714 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "D.4 Proof-of-concept use of TxGemma for end-to-end therapeutic development", + "text_level": 1, + "bbox": [ + 109, + 726, + 821, + 743 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "In Figure S.3, we illustrate a simplified example of how TxGemma might be helpful in identifying a drug for ovarian cancer. In this example, we chose to directly prompt TxGemma, rather than using Agentic-Tx, to strictly isolate potential information leakage introduced by web search, which is outside of our training data. This approach allows us to examine the model's inherent capabilities, though we acknowledge that a full agent-based workflow is a plausible extension.", + "bbox": [ + 109, + 752, + 885, + 829 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "We initially use the DisGeNET prompt to identify an ovarian cancer-associated target gene from a short list of genes including PIK3CA, JAK2, RET. TxGemma-27B-Predict predicts that PIK3CA, a gene not found in the training set which is known to be mutated in ovarian cancer [2], has an association score of 0.7 with ovarian cancer. This association score is nearly 2.5 standard deviations above the mean score ( $\\mu = 0.37$ , $\\sigma = 0.13$ ), indicating a strong association. JAK2 and RET share an association score of 0.3 which is below", + "bbox": [ + 109, + 834, + 885, + 910 + ], + "page_idx": 38 + }, + { + "type": "page_number", + "text": "39", + "bbox": [ + 862, + 936, + 883, + 949 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "the mean score. We then used TxGemma-27B-Predict to select a potential therapeutic from a molecule shortlist, prioritizing predicted $\\mathrm{IC}_{50}$ against the E545K mutant (an oncogenic mutation [3]), toxicity, and clinical trial success. Our manually curated shortlist of drugs, unseen to the model during training, include two existing cancer therapies including alpelisib and afatinib and a novel molecule which we randomly generated. Both afatinib $(1.02\\mu \\mathrm{M}\\mathrm{IC}_{50})$ and the novel molecule $(10.2\\mu \\mathrm{M}\\mathrm{IC}_{50})$ exhibit high predicted $\\mathrm{IC}_{50}$ values, suggesting weak inhibition. However, alpelisib has a predicted $\\mathrm{IC}_{50}$ of $30~\\mathrm{nM}$ , suggestive of potent inhibition and relatively close to the experimental value of $5\\mathrm{nM}$ suggested by Chen et al. [4] and Fritsch et al. [5]. TxGemma-27B-Predict also predicts that alpelisib is not mutagenic and would pass a phase 1 clinical trial for ovarian cancer. This iterative evaluation also corroborated by existing evidence: alpelisib is approved for breast cancer [6] and has shown activity in ovarian cancer [7, 8, 9].", + "bbox": [ + 109, + 89, + 887, + 242 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "This workflow demonstrates a proof-of-concept for TxGemma's application in automating and optimizing therapeutic selection. We anticipate an agentic system capable of generating comprehensive lists of potential therapies and gene-disease associations paired with TxGemma would enable rapid prioritization and filtering, helping in reducing the candidate pool and accelerating the transition to preclinical studies. However, it's crucial to acknowledge the limitations of this demonstration. Clinical trial predictions are limited to Phase 1 success, and mutagenicity predictions do not encompass all aspects of small molecule toxicity. Future work should include experimental validation of TxGemma predictions and consideration of additional toxicity factors, such as hematologic toxicity, which were not included in our data.", + "bbox": [ + 109, + 247, + 883, + 369 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "TxGemma: Gene-Disease Association", + "text_level": 1, + "bbox": [ + 127, + 398, + 315, + 424 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Q: Predict association from O to 1 between the following gene and malignant neoplasm of ovary.", + "bbox": [ + 127, + 431, + 318, + 468 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "PIK3CA:MPPRPSSGELW", + "bbox": [ + 129, + 472, + 243, + 481 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "A: PI3KCA has an association score of 0.7. 0.7 is 2.5a above the mean score", + "bbox": [ + 129, + 489, + 297, + 517 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "TxGemma: Drug-Target Interaction", + "text_level": 1, + "bbox": [ + 129, + 542, + 297, + 569 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Q: Given the following gene, predict how effectively the drug will inhibit it.", + "bbox": [ + 129, + 575, + 339, + 602 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "PI3KCA E545K: MPPRSPSGELW... \nAlpelisib: C1-Cc(SC(-N)NC...)", + "bbox": [ + 129, + 609, + 277, + 627 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "A: Alpelisib has a IC5O of 30 nM.", + "bbox": [ + 130, + 636, + 259, + 652 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/229596d5615d8db9e940ffb05b38f2d2da4d2620458f30f93387cd1de47e9957.jpg", + "image_caption": [ + "Figure S.3 | Proof-of-concept example of applying TxGemma to end-to-end therapeutic development. TxGemma is used to suggest a therapeutic for ovarian cancer by first identifying PIK3CA as an associated gene target from a list of possible genes. Then, from a list of candidate therapeutics, TxGemma predicts that alpelisib (a molecule previously unseen to TxGemma that has shown activity against ovarian cancer and is approved for breast cancer) would bind the E545K mutant of PIK3CA, that it would not be toxic/mutagenic, and that it would be approved in a clinical trial. Note that this example serves as a proof-of-concept demonstration and does not account for all aspects of efficacy, toxicity, or trial approval. Rigorous experimental validation of TxGemma predictions to completely new therapeutics is also a critical step to evaluating TxGemma and remains an area of future work." + ], + "image_footnote": [], + "bbox": [ + 351, + 417, + 669, + 599 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "TxGemma: Clinical Trial Approval", + "text_level": 1, + "bbox": [ + 678, + 400, + 831, + 426 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Q: Predict whether the following drug will pass a phase I clinical trial against malignant neoplasm of ovary.", + "bbox": [ + 678, + 431, + 872, + 468 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Alpelisib:CC1=C(SC(=N1)NC...", + "text_level": 1, + "bbox": [ + 678, + 476, + 810, + 484 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "A: Approved.", + "bbox": [ + 678, + 494, + 723, + 510 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "TxGemma: Toxicity Prediction", + "text_level": 1, + "bbox": [ + 679, + 542, + 810, + 569 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Q: Predict whether the following drug is mutagenic.", + "bbox": [ + 679, + 577, + 839, + 603 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Alpelisib:CC1=C(SC(=N1)NC...", + "text_level": 1, + "bbox": [ + 679, + 609, + 813, + 619 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "A: Not mutagenic.", + "bbox": [ + 679, + 627, + 746, + 645 + ], + "page_idx": 39 + }, + { + "type": "page_number", + "text": "40", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/42c41199fcf8e1c1b61be220eadee67478dcac28dec9f92bb963ec83de23b969.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 192, + 88, + 777, + 310 + ], + "page_idx": 40 + }, + { + "type": "image", + "img_path": "images/0fc6f2350e51b7e6cd5a7bbc1317a452740c03d27b918c2dcd047504fcfadd35.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 192, + 321, + 776, + 566 + ], + "page_idx": 40 + }, + { + "type": "image", + "img_path": "images/d6c50205b6b2c913f7c73865098c30a77a468c56c5c6ec1feffbffeeab77100e.jpg", + "image_caption": [ + "Multi-instance tasks", + "Single-instance and generative tasks", + "Figure S.4 | Performance of TxGemma-27B-Predict compared to generalist and specialist SOTA models (top) The median relative change in performance of TxGemma-27B-Predict compared to Tx-LLM M. (middle) The median relative change in performance of TxGemma-27B-Predict compared to specialist SOTA models. (bottom) The median relative change in performance of TxGemma-27B-Predict compared to all SOTA models, including both Tx-LLM M and specialist models. Multi-instance tasks indicate tasks that involve multiple features, whereas single-instance tasks only involve one feature. The tasks within each task type are defined in Tables S.2 and S.3." + ], + "image_footnote": [], + "bbox": [ + 192, + 574, + 776, + 782 + ], + "page_idx": 40 + }, + { + "type": "page_number", + "text": "41", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 40 + }, + { + "type": "image", + "img_path": "images/d64008d7bba648430f646cee132bb06a7242ca81f3335858adf4a949d8bac876.jpg", + "image_caption": [ + "Figure S.5 | TxGemma performs comparably to LlaSMol on small molecule tasks. Accuracy is reported for binary classification tasks, and RMSE is reported for regression tasks. BBBP corresponds to BBB Martins in TDC tasks, ESOL corresponds to Solubility AqSolDB, and Lipo corresponds to Lipophilicity AstraZeneca." + ], + "image_footnote": [], + "bbox": [ + 308, + 166, + 444, + 316 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/9d4476e7572c6c23be88f9ba85d88ce520cdc474eb566aa40abb2c1e2e6cb7ea.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 452, + 167, + 679, + 313 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/e6c13c82f9aad77ffe02614d6eac77afddf8ff42b5e2ab8a220b12c5e64a976b.jpg", + "image_caption": [ + "Figure S.6 | TxGemma performs comparably to MolE on small molecule tasks. Comparison of MolE with TxGemma-27B-Predict on TDC tasks, separated by metric type (MAE, AUROC, Spearman correlation, and AUPRC). TxGemma-27B-Predict performs better than MolE on 10 out of 22 tasks." + ], + "image_footnote": [], + "bbox": [ + 117, + 551, + 313, + 746 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/3485a458e0e83c0575b63d1e81dbaa9c3d70d585063afc9a1cb9ef52ff9240c6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 305, + 551, + 496, + 728 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/09534e3b6d118249db6e3edff3bfb85233449f57161dcacc055ff510acfaca7c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 551, + 687, + 746 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/c48b857842aeac69278aa4332949b757fe4bc93f85a7901073f2eedfb083c3c4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 684, + 551, + 879, + 729 + ], + "page_idx": 41 + }, + { + "type": "page_number", + "text": "42", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 41 + }, + { + "type": "table", + "img_path": "images/098d8717f5dbde8ee0685821cf521f28993021a44eb08614ac24af4103f4c735.jpg", + "table_caption": [ + "Table S.13 | Model performance on binary classification tasks. TxGemma-Predict and Gemma-2 performances compared with specialist SOTA for each binary classification task, along with the metric type." + ], + "table_footnote": [], + "table_body": "
Task NameMetricSpecialist SOTAGemma-2-2BGemma-2-9BGemma-2-27BTxGemma-2B-PredictTxGemma-9B-PredictTxGemma-27B-Predict
AMESAUROC0.871 [10]0.4870.6050.5080.7960.7980.816
BBB MartinsAUROC0.915 [11]0.2500.6450.5460.8640.8740.907
Bioavailability MaAUROC0.748 [12]0.4790.5840.5790.7150.6550.696
CYP1A2 VeithAUPRC0.900 [13]0.3880.5330.5620.9100.9160.922
CYP2C19 VeithAUROC0.890 [13]0.4560.5950.6190.9050.9060.899
CYP2C9 Substrate CarbonMangelsAUPRC0.441 [10]0.2930.3360.3670.4570.4680.427
CYP2C9 VeithAUPRC0.839 [14]0.2830.3740.4170.8010.7990.798
CYP2D6 Substrate CarbonMangelsAUPRC0.736 [14]0.2330.3290.3860.6050.6030.706
CYP2D6 VeithAUPRC0.739 [14]0.1450.1660.1850.6370.6640.681
CYP3A4 Substrate CarbonMangelsAUROC0.662 [15]0.5140.5850.5960.6690.6220.690
CYP3A4 VeithAUPRC0.904 [14]0.4270.5310.5350.8440.8390.854
Carcinogens LaguninAccuracy0.770 [16]0.2500.2860.3390.8210.8390.857
ClinToxAUROC0.948 [17]0.4370.4820.4240.8100.8310.888
DILIAUROC0.925 [10]0.3200.6510.6270.8750.8480.887
HIA HouAUROC0.988 [18]0.2570.9320.7830.9370.9670.988
HIVAUROC0.851 [19]0.4910.4950.5370.7370.7340.764
HuRIAUPRC0.724 [20]0.4960.4840.5260.7510.7790.799
MHC1 IEDB IMGT NielsenAUROC0.986 [21]0.4980.5040.5170.9100.9270.929
MHC2 IEDB JensenAUROC0.940 [22]0.4980.5260.5440.8120.8500.851
PAMPA NCATSAUROC0.900 [23]0.4650.5830.5440.6420.6710.705
Pgp BroccatelliAUROC0.935 [10]0.4160.6700.4970.9000.9110.936
SARSCOV2 3CLPro DiamondAUROC0.800 [24]0.3010.3880.4770.7330.7080.769
SARSCoV2 Vitro TouretAUROC0.640 [25]0.5680.6110.4790.6500.6680.598
SAbDab ChenAUPRC0.510 [26]0.5320.6960.7010.6760.8070.767
Skin ReactionAUROC0.840 [27]0.4290.5460.4930.6710.6480.708
Tox21AUROC0.961 [28]0.3580.4360.4970.8810.8960.893
ToxCastAUROC0.777 [17]0.4850.5120.5580.7840.7670.800
butkiewiczAUROC0.840 [29]0.4570.4910.4910.7910.7720.831
hERGAUROC0.874 [12]0.5380.6390.5000.8760.8810.884
hERG KarimAccuracy0.770 [30]0.5290.5320.5220.7780.7940.774
herg centralAUROC0.860 [31]0.4810.5110.5170.8800.8610.896
miRTarBaseAccuracy0.804 [32]0.4980.5010.4980.8050.8290.801
phase1AUROC0.576 [33]0.5620.5620.5530.6420.6350.622
phase2AUROC0.645 [33]0.5430.5710.5310.6650.6680.676
phase3AUROC0.723 [33]0.5590.5670.5590.7310.7290.739
weberAUROC0.870 [34]0.4660.5860.4690.7300.7270.749
", + "bbox": [ + 60, + 236, + 936, + 806 + ], + "page_idx": 42 + }, + { + "type": "page_number", + "text": "43", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 42 + }, + { + "type": "table", + "img_path": "images/f86eb6e369466b452142128bdecbe6aacee09469cd8f80397c5d297b32679576.jpg", + "table_caption": [ + "Table S.14 | Model performance on regression and generation tasks. TxGemma-Predict and Gemma-2 performances compared with specialist SOTA for each regression and generation task, along with the metric type. Tasks for which we did not find a specialist SOTA value are indicated with N/A." + ], + "table_footnote": [], + "table_body": "
Task NameMetricSpecialist SOTAGemma-2-2BGemma-2-9BGemma-2-27BTxGemma-2B-PredictTxGemma-9B-PredictTxGemma-27B-Predict
BindingDB PatentPCC0.588 [35]-0.066-0.0390.0300.4220.5240.538
BindingDB ic50Spearman0.637 [36]0.0010.0020.0440.3990.3980.445
BindingDB kdPCC0.712 [37]0.197-0.0090.1190.3520.3700.456
BindingDB kiPCC0.840 [38]-0.018-0.053-0.0270.6610.7370.676
Buchwald HartwigPCC0.786 [39]0.5280.6360.6840.8610.9150.910
Caco2 WangMAE0.285 [18]1.0570.5330.6180.4760.3730.401
Clearance Hepatocyte AZSpearman0.440 [40]0.1410.1630.2140.3530.3380.259
Clearance Microsome AZSpearman0.625 [18]0.2390.3250.2940.4680.6230.462
DAVISMSE0.219 [41]2.7059.0544.4730.6010.5870.555
DisGeNETMAEN/A0.2940.2950.2770.0570.0540.054
DrugComb BlissMAE4.560 [42]8.2137.4136.4564.2304.3374.156
DrugComb CSSMAE16.858 [42]36.84733.83722.61415.75216.48015.000
DrugComb HSAMAE4.453 [42]7.4587.3656.6704.2314.3354.209
DrugComb LoeweMAE9.184 [42]13.87313.36914.73117.34218.66517.336
DrugComb ZIPMAE4.027 [42]8.5886.2265.4043.9503.9043.807
GDSC1PCC0.860 [43]-0.0410.0730.0930.8760.5450.892
GDSC2PCC0.860 [43]-0.043-0.0370.0860.8240.5390.912
Half Life ObachSpearman0.547 [44]0.2880.2840.4850.3860.4940.458
KIBAMSE0.154 [41]2.8871.9252.0160.5880.5480.633
LD50 ZhuMAE0.552 [18]1.9710.8960.8740.7100.6300.628
LeenaySpearman0.740 [45]0.0850.0910.1460.0970.0670.276
Lipophilicity AstraZenecaMAE0.467 [46]1.5061.2071.0320.6100.5650.539
OncoPolyPharmacologyPCC0.730 [47]-0.0400.0640.0720.4730.5180.540
PPBR AZMAE7.788 [46]10.8369.7689.8799.2668.8899.029
Protein SAbDabMAEN/A1.2801.1701.1631.0661.1061.210
Solubility AqSolDBMAE0.761 [46]4.2142.5493.0960.9610.8680.821
TAPMAEN/A5.0084.2413.9585.3014.4734.280
USPTOAccuracy0.415 [48]0.0000.0010.0000.2870.0970.084
USPTO YieldsPCC0.361 [39]-0.0150.0260.0640.0110.0310.395
VDss LombardoSpearman0.627 [49]0.1000.4130.3540.5640.6070.560
", + "bbox": [ + 91, + 290, + 906, + 768 + ], + "page_idx": 43 + }, + { + "type": "page_number", + "text": "44", + "bbox": [ + 862, + 936, + 883, + 949 + ], + "page_idx": 43 + }, + { + "type": "table", + "img_path": "images/9c83b0f019f49296d6ba47ff826893c3814c3507cfe6ad3bd8589e6ca7128176.jpg", + "table_caption": [ + "Table S.15 | Model performance on binary classification tasks. TxGemma-Predict, TxGemma-Chat, and Tx-LLM performances for each binary classification task, along with the metric type." + ], + "table_footnote": [ + "* To predict whether compounds have Anti-HIV properties." + ], + "table_body": "
Task NameMetricTxGemma-9B-PredictTxGemma-27B-PredictTxGemma-9B-ChatTxGemma-27B-ChatTx-LLM STx-LLM M
AMESAUROC0.7980.8160.7210.7330.7850.786
BBB MartinsAUROC0.8740.9070.8110.8610.8050.882
Bioavailability MaAUROC0.6550.6960.6200.6590.6050.702
CYP1A2 VeithAUPRC0.9160.9220.8390.8230.9060.914
CYP2C19 VeithAUROC0.9060.8990.8370.8280.8770.895
CYP2C9 Substrate CarbonMangelsAUPRC0.4680.4270.3820.4270.4030.436
CYP2C9 VeithAUPRC0.7990.7980.6670.6820.7500.788
CYP2D6 Substrate CarbonMangelsAUPRC0.6030.7060.5490.7000.6430.600
CYP2D6 VeithAUPRC0.6640.6810.5040.4350.6050.659
CYP3A4 Substrate CarbonMangelsAUROC0.6220.6900.6420.6660.6370.647
CYP3A4 VeithAUPRC0.8390.8540.7490.7500.8000.840
Carcinogens LaguninAccuracy0.8390.8570.8930.9110.8570.786
ClinToxAUROC0.8310.8880.7110.6370.8180.863
DILIAUROC0.8480.8870.6880.7660.7270.882
HIA HouAUROC0.9670.9880.8720.8970.9420.990
HIV*AUROC0.7340.7640.6120.5820.6860.732
HuRIAUPRC0.7790.7990.6280.6210.7050.753
MHC1 IEDB IMGT NielsenAUROC0.9270.9290.8750.8250.9130.907
MHC2 IEDB JensenAUROC0.8500.8510.7240.6830.7810.863
PAMPA NCATSAUROC0.6710.7050.7350.6640.6460.668
Pgp BroccatelliAUROC0.9110.9360.8990.9120.9090.939
SARSCOV2 3CLPro DiamondAUROC0.7080.7690.6990.7220.7550.712
SARSCoV2 Vitro TouretAUROC0.6680.5980.5030.5060.5120.601
SAbDab ChenAUPRC0.8070.7670.7020.7190.3900.473
Skin ReactionAUROC0.6480.7080.6380.5430.5640.615
Tox21AUROC0.8960.8930.8070.7970.8580.882
ToxCastAUROC0.7670.8000.7540.7340.7790.792
butkiewiczAUROC0.7720.8310.6290.6190.5740.566
hERGAUROC0.8810.8840.8300.8320.8790.909
hERG KarimAccuracy0.7940.7740.6570.6680.7240.745
herg centralAUROC0.8610.8960.8300.8070.8800.888
miRTarBaseAccuracy0.8290.8010.6790.6440.7650.799
phase1AUROC0.6350.6220.5760.5570.6240.667
phase2AUROC0.6680.6760.6380.6260.6390.676
phase3AUROC0.7290.7390.6830.6680.7010.728
weberAUROC0.7270.7490.6720.6430.7380.743
", + "bbox": [ + 71, + 233, + 926, + 795 + ], + "page_idx": 44 + }, + { + "type": "page_number", + "text": "45", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 44 + }, + { + "type": "table", + "img_path": "images/1b5ffa1d3820b6dfedd7efb2c60c35cd6a2033c4153cbd94273ca4ae0964a8d1.jpg", + "table_caption": [ + "Table S.16 | Model performance on regression and generation tasks. TxGemma-Predict, TxGemma-Chat, and Tx-LLM performances for each regression and generation task, along with the metric type." + ], + "table_footnote": [], + "table_body": "
Task NameMetricTxGemma-9B-PredictTxGemma-27B-PredictTxGemma-9B-ChatTxGemma-27B-ChatTx-LLM STx-LLM M
BindingDB PatentPCC0.5240.5380.4520.2200.4740.531
BindingDB ic50Spearman0.3980.4450.4120.3620.3260.311
BindingDB kdPCC0.3700.4560.1620.1590.3170.391
BindingDB kiPCC0.7370.6760.4480.2110.5650.726
Buchwald HartwigPCC0.9150.9100.2550.7570.6820.905
Caco2 WangMAE0.3730.4010.6430.3980.6210.432
Clearance Hepatocyte AZSpearman0.3380.2590.1970.1500.2560.385
Clearance Microsome AZSpearman0.6230.4620.3450.4200.3850.413
DAVISMSE0.5870.5550.6080.5610.5640.704
DisGeNETMAE0.0540.0540.0660.0640.0590.057
DrugComb BlissMAE4.3374.1564.5024.5114.4254.104
DrugComb CSSMAE16.48015.00016.38416.90014.74014.057
DrugComb HSAMAE4.3354.2094.4974.5204.3114.118
DrugComb LoeweMAE18.66517.33616.99416.91417.42817.381
DrugComb ZIPMAE3.9043.8074.1394.1414.0473.777
GDSC1PCC0.5450.8920.8610.8020.8760.887
GDSC2PCC0.5390.9120.8640.8230.8960.900
Half Life ObachSpearman0.4940.4580.3300.4140.5250.448
KIBAMSE0.5480.6330.7050.8520.7090.548
LD50 ZhuMAE0.6300.6280.7400.7050.8080.618
LeenaySpearman0.0670.2760.1280.0950.0480.083
Lipophilicity AstraZenecaMAE0.5650.5390.9850.8420.7790.587
OncoPolyPharmacologyPCC0.5180.5400.3590.1930.4180.552
PPBR AZMAE8.8899.02911.36710.89511.1389.108
Protein SAbDabMAE1.1061.2101.2681.1161.4321.268
Solubility AqSolDBMAE0.8680.8211.1591.1330.9310.987
TAPMAE4.4734.2804.8594.0835.0754.983
USPTOAccuracy0.0970.0840.0860.0910.2200.239
USPTO YieldsPCC0.0310.3950.0030.0260.0420.070
VDss LombardoSpearman0.6070.5600.3960.4070.4970.609
", + "bbox": [ + 102, + 282, + 895, + 760 + ], + "page_idx": 45 + }, + { + "type": "page_number", + "text": "46", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 45 + }, + { + "type": "table", + "img_path": "images/011981604316ec859899c67e2c3b9723288769a28b7e640db6e16cd5b20b778e.jpg", + "table_caption": [ + "Table S.17 | Model performance on binary classification tasks for models trained only on datasets with commercial licenses. TxGemma-Predict and TxGemma-Chat performances for each binary classification task, along with the metric type." + ], + "table_footnote": [ + "* To predict whether compounds have Anti-HIV properties." + ], + "table_body": "
Task NameMetricTxGemma-2B-PredictTxGemma-9B-PredictTxGemma-27B-PredictTxGemma-9B-ChatTxGemma-27B-Chat
AMESAUROC0.8120.8030.8260.7230.729
BBB MartinsAUROC0.8830.8490.8990.8320.848
Bioavailability MaAUROC0.6880.6880.7240.6660.625
CYP1A2 VeithAUPRC0.9110.9140.9160.8620.817
CYP2C19 VeithAUROC0.9050.8970.8970.8440.823
CYP2C9 Substrate CarbonMangelsAUPRC0.4170.3900.4600.4140.375
CYP2C9 VeithAUPRC0.7870.8000.7930.7000.685
CYP2D6 Substrate CarbonMangelsAUPRC0.6260.6970.7060.6530.704
CYP2D6 VeithAUPRC0.6660.6620.6770.5170.422
CYP3A4 Substrate CarbonMangelsAUROC0.6380.6800.6920.6440.653
CYP3A4 VeithAUPRC0.8420.8390.8520.7600.747
Carcinogens LaguninAccuracy0.9110.8570.8750.8930.929
ClinToxAUROC0.9170.8150.8840.7160.595
DILIAUROC0.8290.8230.9270.6750.797
HIA HouAUROC0.9840.9540.9900.9060.927
HIVAUROC0.7810.7300.7680.6410.589
HuRIAUPRC0.7350.7670.7970.6850.620
MHC1 IEDB IMGT NielsenAUROC0.9300.9290.9330.8870.826
MHC2 IEDB JensenAUROC0.8550.8520.8550.7330.682
PAMPA NCATSAUROC0.6940.6300.7240.6840.659
Pgp BroccatelliAUROC0.9220.9320.9410.8730.920
SARSCOV2 3CLPro DiamondAUROC0.7480.7990.6760.7160.712
SARSCoV2 Vitro TouretAUROC0.6590.6220.5970.5270.516
SAbDab ChenAUPRC0.7260.7450.7930.5230.731
Skin ReactionAUROC0.6910.6240.7330.6210.571
Tox21AUROC0.8970.8930.8900.8180.797
ToxCastAUROC0.7870.7660.7970.7540.735
butkiewiczAUROC0.8110.7750.8260.6810.606
hERGAUROC0.9020.8900.8940.8550.829
hERG KarimAccuracy0.7780.7960.7720.6490.673
herg centralAUROC0.8900.8600.8920.8420.805
miRTarBaseAccuracy0.8180.8340.8020.6720.649
weberAUROC0.7500.6970.7490.6920.645
", + "bbox": [ + 112, + 261, + 883, + 781 + ], + "page_idx": 46 + }, + { + "type": "page_number", + "text": "47", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 46 + }, + { + "type": "table", + "img_path": "images/2f9c4d08248fb222ff8b63a338289fdf3f2fba41aaaec627497361f9dcd3fb03.jpg", + "table_caption": [ + "Table S.18 | Model performance on regression and generation tasks for models trained only on datasets with commercial licenses. TxGemma-Predict and TxGemma-Chat performances for each regression or generation task, along with the metric type." + ], + "table_footnote": [], + "table_body": "
Task NameMetricTxGemma-2B-PredictTxGemma-9B-PredictTxGemma-27B-PredictTxGemma-9B-ChatTxGemma-27B-Chat
BindingDB PatentPCC0.5560.3760.5370.4380.118
BindingDB ic50Spearman0.4250.3130.4650.4430.361
BindingDB kdPCC0.4900.3930.2890.2070.156
BindingDB kiPCC0.7280.7120.6700.3870.218
Buchwald HartwigPCC0.9200.9180.9030.5740.818
Caco2 WangMAE0.6190.4910.4790.5880.383
Clearance Hepatocyte AZSpearman0.2920.3780.3500.1660.190
Clearance Microsome AZSpearman0.5210.5240.5100.3940.395
DAVISMSE0.5760.5640.5750.5610.561
DrugComb BlissMAE4.0884.2864.1574.4544.519
DrugComb CSSMAE14.56815.37014.92515.96016.649
DrugComb HSAMAE4.0634.2824.1784.4864.529
DrugComb LoeweMAE17.31317.86217.32717.19016.873
DrugComb ZIPMAE3.7373.8483.8234.0934.132
Half Life ObachSpearman0.4230.3480.4910.2690.393
KIBAMSE0.5620.5250.5540.8300.858
LD50 ZhuMAE0.6980.7180.6770.7240.721
LeenaySpearman0.1140.0890.2590.0780.183
Lipophilicity AstraZenecaMAE0.5710.6670.6130.8340.837
OncoPolyPharmacologyPCC0.5560.4370.5310.3880.148
PPBR AZMAE8.8139.1778.79211.00411.025
Protein SAbDabMAE1.1171.0221.0721.3481.173
Solubility AqSolDBMAE0.9111.1850.8021.1601.135
TAPMAE5.4984.8394.0884.6114.444
USPTOAccuracy0.3160.0410.2810.1450.090
USPTO YieldsPCC0.4710.0020.3500.1140.002
VDss LombardoSpearman0.5940.5380.5910.4100.487
", + "bbox": [ + 140, + 311, + 857, + 746 + ], + "page_idx": 47 + }, + { + "type": "page_number", + "text": "48", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/4060b21bb67dbad81e197effeba434dd94828fd7e278fae975f4ae41d84efdb2.jpg", + "image_caption": [ + "Figure S.7 | Performance of TxGemma and Gemma-2 models on MMLU. Score of TxGemma-27B-Chat, TxGemma-27B-Predict, and Gemma-2-27B on each subject of the MMLU benchmark [50], a comprehensive suite of 57 diverse tasks spanning mathematics, history, computer science, law, etc. This benchmark evaluates knowledge, reasoning, and problem-solving abilities across a wide range of academic subjects, providing a robust measure of overall language understanding. TxGemma-27B-Chat (73.87%) performs slightly below Gemma-2-27B (75.38%) but significantly better than TxGemma-27B-Predict (53.60%), suggesting that domain fine-tuning purely on therapeutic data can cause loss of information learned during base model training, but simultaneously training with general instruction-tuning data can compensate for this." + ], + "image_footnote": [], + "bbox": [ + 200, + 90, + 790, + 785 + ], + "page_idx": 48 + }, + { + "type": "page_number", + "text": "49", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Tell me about the blood-brain barrier", + "bbox": [ + 161, + 292, + 348, + 303 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "TxGemma", + "text_level": 1, + "bbox": [ + 279, + 332, + 336, + 343 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "The blood-brain barrier (BBB) is a highly selective semipermeable border that separates the circulating blood from the brain and extracellular fluid in the central nervous system (CNS). It plays a crucial role in maintaining a stable environment for the brain, protecting it from harmful substances and pathogens while allowing the passage of essential nutrients.", + "bbox": [ + 279, + 354, + 825, + 396 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Here are some key features of the BBB:", + "bbox": [ + 279, + 398, + 472, + 409 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Cellular composition: The BBB is primarily formed by endothelial cells lining the capillaries in the brain. These cells are tightly connected by specialized tight junctions that restrict the movement of molecules between them.", + "bbox": [ + 279, + 412, + 825, + 441 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Astrocytes and pericytes: These cells provide structural and functional support to the endothelial cells, contributing to the regulation of BBB permeability.", + "bbox": [ + 279, + 446, + 823, + 467 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Selective transport mechanisms: The BBB utilizes various transport systems to regulate the passage of molecules across its barrier. These include:", + "bbox": [ + 279, + 470, + 823, + 489 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Passive diffusion: Small lipophilic molecules can readily cross the BBB.", + "bbox": [ + 279, + 493, + 629, + 503 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Facilitated diffusion: Carrier-mediated transport systems assist the movement of specific molecules, such as glucose and amino acids.", + "bbox": [ + 279, + 507, + 823, + 527 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Active transport: Energy-dependent mechanisms pump molecules against their concentration gradient, such as the removal of toxins from the brain.", + "bbox": [ + 279, + 530, + 825, + 551 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "P-glycoprotein (P-gp): This efflux pump actively removes a wide range of drugs from the brain, including many central nervous system-acting medications.", + "bbox": [ + 279, + 554, + 825, + 575 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "*Impairment of the BBB can lead to various neurological disorders, including stroke, multiple sclerosis, and Alzheimer's disease. It can also allow the entry of harmful substances into the brain, such as pathogens and toxins.", + "bbox": [ + 279, + 578, + 825, + 607 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "*Researchers are actively developing strategies to overcome the BBB for drug delivery to the brain, including using nanocarriers and focused ultrasound.", + "bbox": [ + 279, + 611, + 823, + 632 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Figure S.8 | Models can converse about general topics after fine-tuning only on the processed TDC data. Example of a dialogue with TxGemma-27B-Predict. When asked a question that is not in a processed TDC data format, the model is able to respond coherently.", + "bbox": [ + 109, + 662, + 887, + 707 + ], + "page_idx": 49 + }, + { + "type": "page_number", + "text": "50", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Example of TxGemma-27B-Predict Failure in Conversation", + "text_level": 1, + "bbox": [ + 362, + 270, + 627, + 281 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Prompt", + "text_level": 1, + "bbox": [ + 225, + 300, + 264, + 309 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Instructions: Answer the following question about drug properties.", + "bbox": [ + 225, + 311, + 521, + 321 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Context: As a membrane separating circulating blood and brain extracellular fluid, the blood-brain barrier (BBB) is the protection layer that blocks most foreign drugs. Thus the ability of a drug to penetrate the barrier to deliver to the site of action forms a crucial challenge in development of drugs for central nervous system.", + "bbox": [ + 225, + 325, + 750, + 352 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Question: Given a drug SMILES string, predict whether it", + "bbox": [ + 227, + 354, + 475, + 364 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "(A) does not cross the BBB (B) crosses the BBB", + "bbox": [ + 227, + 367, + 431, + 376 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Drug SMILES: C1CN(CCC1(C=O)NC(CCO)C2=CC=C(C=C2)Cl)N)C3=NC=NC4=C3C=CN4", + "bbox": [ + 227, + 378, + 617, + 387 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Answer:", + "bbox": [ + 227, + 390, + 267, + 398 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Tx-Gemma-Predict Response", + "text_level": 1, + "bbox": [ + 227, + 417, + 366, + 426 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Answer: (B)", + "bbox": [ + 227, + 431, + 281, + 440 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Prompt", + "text_level": 1, + "bbox": [ + 227, + 458, + 264, + 467 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "What properties are useful for crossing the blood-brain barrier? Don't answer with (A) or (B)", + "bbox": [ + 227, + 472, + 625, + 481 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Tx-Gemma-Predict Response", + "text_level": 1, + "bbox": [ + 227, + 498, + 366, + 507 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Answer: Small molecular size, high lipophilicity, lack of ionization, and absence of extensive protein binding are generally favorable for BBB penetration.", + "bbox": [ + 227, + 511, + 746, + 531 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Prompt", + "text_level": 1, + "bbox": [ + 227, + 547, + 264, + 556 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Does the drug I gave you have these properties? Don't answer with (A) or (B)", + "bbox": [ + 227, + 560, + 558, + 569 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Tx-Gemma-Predict Response", + "text_level": 1, + "bbox": [ + 227, + 585, + 366, + 595 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Answer: (B)", + "bbox": [ + 227, + 599, + 281, + 609 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Figure S.9 | Predictive model struggles to hold a conversation about processed TDC data prompts after fine-tuning only on this data. Example of a multi-turn dialogue with TxGemma-27B-Predict. The initial turn is a question in the processed TDC data format. After the initial turn, the model is prompted to describe some properties associated with the problem of crossing the blood-brain barrier. An additional instruction to avoid answering with (A) or (B) is necessary. The model is able to provide some answers in natural language, but another turn leads to an inappropriate multiple choice answer.", + "bbox": [ + 109, + 637, + 885, + 728 + ], + "page_idx": 50 + }, + { + "type": "page_number", + "text": "51", + "bbox": [ + 862, + 936, + 882, + 948 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Provide five steps of reasoning to think through the problem and then provide an answer. Let's think step by step. Consider whether the drug would pass phase 1 trials, not whether the drug would be fully approved. You must also discuss the specific drug and disease as part of your reasoning that leads to your answer. This should be emphasized.", + "bbox": [ + 147, + 97, + 720, + 140 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Instructions: Answer the following question about clinical trials.", + "bbox": [ + 147, + 145, + 480, + 157 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Context: Clinical trial is the most time and cost-consuming step in the drug discovery process. Phase 1 clinical trials test the safety and basic properties of a new drug or treatment in a small group of people for the first time. Optimizing and designing trials with machine learning could drastically lead to the speedup of delivery of life-saving therapeutics to patients. Clinical trial outcome prediction is a machine learning task that aims to forecast the outcome of clinical trials, such as the approval rate of a drug or treatment. It utilizes various clinical trial features, including the drug's molecular structure and patient disease.", + "bbox": [ + 145, + 162, + 720, + 227 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Question: Given a drug SMILES string and disease, predict if the phase 1 trial (A) would not be approved (B) would be approved", + "bbox": [ + 147, + 231, + 547, + 253 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Drug SMILES: CC1=C(SC(=N1)NC(=O)N2CCCCC2(=O)N)C3=CC(=NC=C3)C(C)(C)(C(F)(F)F", + "bbox": [ + 147, + 260, + 604, + 272 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Disease: Malignant neoplasm of ovary: Ovarian cancer is a disease that affects women. In this form of cancer, certain cells in the ovary become abnormal and multiply uncontrollably to form a tumor. The ovaries are the female reproductive organs in which egg cells are produced. In about 90 percent of cases, ...", + "bbox": [ + 145, + 277, + 720, + 310 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Remember: (A) would not be approved (B) would be approved", + "bbox": [ + 147, + 315, + 477, + 327 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Reasoning:", + "bbox": [ + 148, + 335, + 212, + 348 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "TxGemma", + "text_level": 1, + "bbox": [ + 264, + 372, + 330, + 385 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Here's a step-by-step reasoning process to predict the outcome of the Phase 1 trial:", + "bbox": [ + 267, + 397, + 705, + 409 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "1. Analyze the Drug's Chemical Structure:", + "text_level": 1, + "bbox": [ + 267, + 426, + 496, + 438 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Observation: The SMILES string indicates a complex molecule with multiple aromatic rings, fluorine atoms, and amide groups.", + "bbox": [ + 267, + 441, + 841, + 463 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Reasoning: Such structures often point to potential biological activity, but also raise concerns about potential toxicity and side effects.", + "bbox": [ + 267, + 465, + 841, + 487 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "2. Consider the Disease:", + "text_level": 1, + "bbox": [ + 267, + 491, + 403, + 501 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Observation: Ovarian cancer is a serious and often aggressive disease with limited treatment options.", + "bbox": [ + 267, + 505, + 797, + 517 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Reasoning: The unmet medical need is high, meaning there's a greater chance for a new drug to be approved if it shows any promise of efficacy.", + "bbox": [ + 267, + 520, + 841, + 542 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "3. Evaluate Potential Toxicity:", + "text_level": 1, + "bbox": [ + 267, + 560, + 431, + 571 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Observation: We don't have specific information about the drug's toxicity profile from the given data.", + "bbox": [ + 267, + 574, + 794, + 585 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Reasoning: Phase 1 trials primarily focus on safety. The drug's chemical structure suggests potential for toxicity, which would need to be carefully assessed in the trial.", + "bbox": [ + 267, + 588, + 839, + 611 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "4. Assess the Likelihood of Demonstrating Safety:", + "text_level": 1, + "bbox": [ + 267, + 628, + 542, + 638 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Observation: While the drug's structure raises concerns, there's no information suggesting inherent, severe toxicity that would preclude Phase 1 testing.", + "bbox": [ + 267, + 643, + 841, + 664 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Reasoning: Most drugs with novel structures undergo preclinical testing before reaching Phase 1. If the drug passed those tests, it likely has a reasonable chance of demonstrating acceptable safety in a small group of patients.", + "bbox": [ + 267, + 667, + 841, + 700 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "5. Weighing the Factors:", + "text_level": 1, + "bbox": [ + 267, + 717, + 406, + 729 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Observation: We have a potentially effective drug for a serious disease, but with potential toxicity concerns.", + "bbox": [ + 267, + 732, + 833, + 744 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Reasoning: Phase 1 trials are designed to assess safety and dosage. Even if the drug has some side effects, it's likely to be approved to proceed to further testing if it shows any signs of efficacy and the benefits outweigh the risks in the context of a serious disease.", + "bbox": [ + 267, + 747, + 841, + 779 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Answer: (B) would be approved", + "bbox": [ + 267, + 787, + 437, + 800 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Figure S.10 | Enabling reasoning with TxGemma-Chat on challenging tasks. Example of prompting TxGemma-27B-Chat to explain its prediction for predicting clinical trial approval, a relatively unintuitive task. A prefix is added before the prompt to provide instructions for reasoning, and a reminder is added at the end so the model correctly specifies the option corresponding to its desired answer. Lastly, the \"Answer\" text is changed to \"Reasoning:\" to enable reasoning steps. The reasoning provided by the model is not comprehensive but can provide useful insights into the drug-disease interaction.", + "bbox": [ + 109, + 838, + 885, + 929 + ], + "page_idx": 51 + }, + { + "type": "page_number", + "text": "52", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 51 + }, + { + "type": "image", + "img_path": "images/50aa41bd72408b2d1964da82f8d4a59c63c3870b25b0effe6eb0301bce2fba97.jpg", + "image_caption": [ + "Figure S.11 | Inference speed of TxGemma models. The number of examples inferred per day at different model sizes, normalized by the number of TPUv5e chips used for serving. The PPBR AZ task was used for the benchmarking due to its reasonable size." + ], + "image_footnote": [], + "bbox": [ + 269, + 106, + 720, + 281 + ], + "page_idx": 52 + }, + { + "type": "image", + "img_path": "images/b821a33a107593bfb04127fe8375bcedb99541d1716ad89c30f99bc5c74a7f45.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 313, + 398, + 681, + 625 + ], + "page_idx": 52 + }, + { + "type": "image", + "img_path": "images/595c78f89cc375092a21ef0c686aac0157bd0fd7a3be1241a02d3da171359946.jpg", + "image_caption": [ + "Figure S.12 | Contamination analysis. (top) Out of 66 tasks, $23\\%$ had some datapoints in the test set that were found in the Gemma-2 pretraining data, while $77\\%$ did not. For tasks that had some contaminated datapoints, we plot the percent of the test set that was contaminated. (bottom) Distributions of cosine similarities between SMILES string embeddings and molecular name embeddings. Decoy name embeddings indicate a random different molecule name." + ], + "image_footnote": [], + "bbox": [ + 310, + 635, + 679, + 799 + ], + "page_idx": 52 + }, + { + "type": "page_number", + "text": "53", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 52 + }, + { + "type": "image", + "img_path": "images/a06c6ddce36e0a8cc6fde75d0a6734d0239ad730b94f9224094944eb29afbd53.jpg", + "image_caption": [ + "Figure S.13 | Model performance after filtering contaminated datapoints. Performance of TxGemma-27B-Predict on both original unfiltered test sets and filtered test sets in which contaminated datapoints were removed. (left) For these tasks, higher values correspond to better models, and the metrics are defined in Tables S.13 and S.14. Error bars are bootstrapped standard errors. (right) For these tasks, lower values correspond to better models, and the metrics (either MAE or MSE) are defined in Tables S.13 and S.14. Error bars are bootstrapped standard errors." + ], + "image_footnote": [], + "bbox": [ + 174, + 137, + 553, + 359 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/5a77174bc677da9772e91115ac640fda919da96c5104c827eee2013682a15b8a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 589, + 138, + 821, + 345 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/8831205666fac2be3162fb8e88173d029338f92c0c4193c627a45407203b69d9.jpg", + "image_caption": [ + "Figure S.14 | Breakdown of tool-usage frequency for Chemical Preference dataset and HLE dataset. Agentic-Tx adapts its tool usage to reason effectively about different tasks. For Chemical Preference, which requires evaluating drug candidates, the system correctly invokes tools for molecular characterization and safety assessment, such as SMILES description and toxicity prediction. For the Bio+Med task, focused on complex biomedical questions, the agent prioritizes PubMed and Wikipedia, demonstrating reliance on broad knowledge retrieval and synthesis." + ], + "image_footnote": [], + "bbox": [ + 174, + 561, + 504, + 767 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/bb7daa0ab3745fb49912d1219615736ceb002d7d69fafd8c7a73819734f71a09.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 561, + 821, + 767 + ], + "page_idx": 53 + }, + { + "type": "page_number", + "text": "54", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/0dbc4689ed18c4fc12b7ae53b6420d0f6eb26f24706c74f033eb5e0918cf8339.jpg", + "image_caption": [ + "Figure S.15 | Breakdown of tool-usage per question in chemical preference dataset. Marker size represents usage count and corresponds to the number of uses per each tool; blue indicates accuracy increase, light red indicates decrease associated with each tool per question. We observe questions involve up to 8 tool calls. High usage of SMILES description and toxicity prediction correlates with improved performance. This demonstrates Agentic-Tx's adaptive tool selection to meet task requirements and improved performance." + ], + "image_footnote": [], + "bbox": [ + 117, + 92, + 883, + 256 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/a9c5a0cec4eba54bfa2c62a6203b620f76c3bf7a0500cf31babec23662e84c49.jpg", + "image_caption": [ + "Figure S.16 | Ability to combine SMILES and text is independent of model size. Median relative change of TxGemma-27B-Predict, TxGemma-9B-Predict and TxGemma-2B-Predict performance from SOTA for tasks grouped by feature type. The signs were reversed for MAE and MSE metrics because lower MAE and MSE values correspond to better performances. The number of tasks in each feature type is displayed over each bar. In all models, over $90\\%$ of tasks had a median relative performance change greater than -0.2, and SMILES + Text consistently outperformed SOTA." + ], + "image_footnote": [], + "bbox": [ + 117, + 378, + 364, + 542 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/98955b38edb8949ab00f726cf43f2ccf404666fb5bdd25f71cd16900b64edab5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 375, + 378, + 620, + 541 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/abcd5f2594648ea69cc611d92f1720d998830411dd4e40aca9dce191977179fb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 635, + 378, + 880, + 542 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/53ef111b2c7457fddcdcb839e97bb4a0565136bb40ee6962ec4d103f9efc3a79.jpg", + "image_caption": [ + "Figure S.17 | Ablations of model sizes and model adaptations. (left) Relative performance changes for pairwise comparisons of TxGemma-Predict models (TxGemma-2B-Predict, TxGemma-9B-Predict, TxGemma-27B-Predict). (right) Relative performance changes of TxGemma models compared to their respective base models." + ], + "image_footnote": [], + "bbox": [ + 117, + 676, + 460, + 837 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/97a2ed49d9a6a34eb3566d17f60c2015ee4e18897569ccfbdda77a9076bdca00.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 535, + 676, + 880, + 835 + ], + "page_idx": 54 + }, + { + "type": "page_number", + "text": "55", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/5440136090b3e3dd0547d3116cd363c58ded13e0ceb7ef554d37f45cd179852c.jpg", + "image_caption": [ + "Figure S.18 | TxGemma predictions show correlations between toxicity and clinical trial approval. Spearman correlation coefficients between toxicity predictions (measured by AMES, DILI, and hERG central) and clinical trial predictions (measured by Phase1, Phase2, and Phase3) on a set of PubChem molecules." + ], + "image_footnote": [], + "bbox": [ + 346, + 383, + 647, + 542 + ], + "page_idx": 55 + }, + { + "type": "page_number", + "text": "56", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 120, + 90, + 228, + 104 + ], + "page_idx": 56 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Chen, J., Hu, Y., Wang, Y., Lu, Y., Cao, X., Lin, M., Xu, H., Wu, J., Xiao, C., Sun, J., et al. TrialBench: Multi-modal artificial intelligence-ready clinical trial datasets. arXiv preprint arXiv:2407.00631 (2024).", + "2. Kuo, K.-T., Mao, T.-L., Jones, S., Veras, E., Ayhan, A., Wang, T.-L., Glas, R., Slamon, D., Velculescu, V. E., Kuman, R. J., et al. Frequent activating mutations of PIK3CA in ovarian clear cell carcinoma. The American journal of pathology 174, 1597-1601 (2009).", + "3. Leontiadou, H., Galdadas, I., Athanasiou, C. & Cournia, Z. Insights into the mechanism of the PIK3CA E545K activating mutation using MD simulations. Scientific reports 8, 15544 (2018).", + "4. Chen, H., Si, Y., Wen, J., Hu, C., Xia, E., Wang, Y. & Wang, O. P110α inhibitor alpelisib exhibits a synergistic effect with pyrotinib and reverses pyrotinib resistant in HER2+ breast cancer. Neoplasia 43, 100913 (2023).", + "5. Fritsch, C., Huang, A., Chatenay-Rivauday, C., Schnell, C., Reddy, A., Liu, M., Kauffmann, A., Guthy, D., Erdmann, D., De Pover, A., et al. Characterization of the novel and specific PI3Kα inhibitor NVP-BYL719 and development of the patient stratification strategy for clinical trials. Molecular cancer therapeutics 13, 1117-1129 (2014).", + "6. Narayan, P., Prowell, T. M., Gao, J. J., Fernandes, L. L., Li, E., Jiang, X., Qiu, J., Fan, J., Song, P., Yu, J., et al. FDA approval summary: alpelisib plus fulvestrant for patients with HR-positive, HER2-negative, PIK3CA-mutated, advanced or metastatic breast cancer. Clinical Cancer Research 27, 1842-1849 (2021).", + "7. Passarelli, A., Carbone, V., Pignata, S., Mazzeo, R., Lorusso, D., Scambia, G., Canova, S., Di Palma, T., Tasca, G., Mantiero, M., et al. Alpelisib for PIK3CA-mutated advanced gynecological cancers: first clues of clinical activity. *Gynecologic Oncology* 183, 61-67 (2024).", + "8. Thibault, B., Thole, A., D'Angelo, R., Basset, C. & Guillermet-Guibert, J. PI3Kα-specific inhibitor BYL-719 synergizes with cisplatin in vitro in PIK3CA-mutated ovarian cancer cells. Scientific Reports 15, 6265 (2025).", + "9. Hu, X., Xia, M., Wang, J., Yu, H., Chai, J., Zhang, Z., Sun, Y., Su, J. & Sun, L. Dual PI3K/mTOR inhibitor PKI-402 suppresses the growth of ovarian cancer cells by degradation of Mcl-1 through autophagy. Biomedicine & Pharmacotherapy 129, 110397 (2020).", + "10. Turon, G., Hlozek, J., Woodland, J. G., Kumar, A., Chibale, K. & Duran-Frigola, M. First fully-automated AI/ML virtual screening cascade implemented at a drug discovery centre in Africa. Nature Communications 14, 5736 (2023).", + "11. Fontenot, R., Kathad, U., McDermott, J., Sturtevant, D., Sharma, P. & Carr, P. Predicting a Compounds Blood-Brain-Barrier Permeability with Lantern Pharma's AI and ML Platform, RADR 2023.", + "12. Bera, S., Dent, J., Gill, G., Stolman, A. & Wu, B. SimGCN for TDC Benchmarks (2022).", + "13. Plonka, W., Stork, C., Šićho, M. & Kirchmair, J. CYPlebrity: Machine learning models for the prediction of inhibitors of cytochrome P450 enzymes. Bioorganic & medicinal chemistry 46, 116388 (2021).", + "14. Hu, W., Liu, B., Gomes, J., Zitnik, M., Liang, P., Pande, V. & Leskovec, J. Strategies for pre-training graph neural networks. arXiv preprint arXiv:1905.12265 (2019).", + "15. Huang, K., Fu, T., Glass, L. M., Zitnik, M., Xiao, C. & Sun, J. DeepPurpose: a deep learning library for drug-target interaction prediction. Bioinformatics 36, 5545-5547 (2020).", + "16. Lagunin, A., Filimonov, D., Zakharov, A., Xie, W., Huang, Y., Zhu, F., Shen, T., Yao, J. & Poroikov, V. Computer-aided prediction of rodent carcinogenicity by PASS and CISOC-PSCT. QSAR & Combinatorial Science 28, 806-810 (2009).", + "17. Li, P., Li, Y., Hsieh, C.-Y., Zhang, S., Liu, X., Liu, H., Song, S. & Yao, X. TrimNet: learning molecular representation from triplet messages for biomedicine. Briefings in Bioinformatics 22, bbaa266 (2021).", + "18. Huang, D., Chowdhuri, S. R., Li, A., Li, A., Agrawal, A., Gano, K. & Zhu, A. A Unified System for Molecular Property Predictions: Oloren ChemEngine and its Applications (2022).", + "19. Li, J., Cai, D. & He, X. Learning graph-level representation for drug discovery. arXiv preprint arXiv:1709.03741 (2017).", + "20. Raimondi, D., Simm, J., Arany, A. & Moreau, Y. A novel method for data fusion over entity-relation graphs and its application to protein-protein interaction prediction. Bioinformatics 37, 2275-2281 (2021).", + "21. Gfeller, D., Schmidt, J., Croce, G., Guillaume, P., Bobisse, S., Genolet, R., Queiroz, L., Cesbron, J., Racle, J. & Harari, A. Improved predictions of antigen presentation and TCR recognition with MixMHCpred2. 2 and PRIME2. 0 reveal potent SARS-CoV-2 CD8+ T-cell epitopes. Cell Systems 14, 72-83 (2023).", + "22. Motmaen, A., Dauparas, J., Baek, M., Abedi, M. H., Baker, D. & Bradley, P. Peptide-binding specificity prediction using fine-tuned protein structure prediction networks. Proceedings of the National Academy of Sciences 120, e2216697120 (2023).", + "23. Siramshetty, V., Williams, J., Nguyen, D., Neyra, J., Southall, N., Mathé, E., Xu, X. & Shah, P. Validating ADME QSAR models using marketed drugs. SLAS DISCOVERY: Advancing the Science of Drug Discovery 26, 1326-1336 (2021).", + "24. Haneczok, J. & Delijewski, M. Machine learning enabled identification of potential SARS-CoV-2 3CLpro inhibitors based on fixed molecular fingerprints and Graph-CNN neural representations. Journal of Biomedical Informatics 119, 103821 (2021).", + "25. Liu, Y., Wu, Y., Shen, X. & Xie, L. COVID-19 multi-targeted drug repurposing using few-shot learning. Frontiers in Bioinformatics 1, 693177 (2021).", + "26. Chen, X., Dougherty, T., Hong, C., Schibler, R., Zhao, Y. C., Sadeghi, R., Matasci, N., Wu, Y.-C. & Kerman, I. Predicting antibody developability from sequence using machine learning. *biorxiv*, 2020-06 (2020).", + "27. Alves, V. M., Muratov, E., Fourches, D., Strickland, J., Kleinstreuer, N., Andrade, C. H. & Tropsha, A. Predicting chemically-induced skin reactions. Part I: QSAR models of skin sensitization and their application to identify potentially hazardous compounds. Toxicology and applied pharmacology 284, 262-272 (2015).", + "28. Shermukhamedov, S., Mamurjonova, D. & Probst, M. Structure to Property: Chemical Element Embeddings and a Deep Learning Approach for Accurate Prediction of Chemical Properties. arXiv preprint arXiv:2309.09355 (2023)." + ], + "bbox": [ + 112, + 119, + 887, + 893 + ], + "page_idx": 56 + }, + { + "type": "page_number", + "text": "57", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 56 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "29. Vu, O., Mendenhall, J., Altarawy, D. & Meiler, J. BCL.: Mol2D—a robust atom environment descriptor for QSAR modeling and lead optimization. Journal of computer-aided molecular design 33, 477–486 (2019).", + "30. Karim, A., Lee, M., Balle, T. & Sattar, A. CardioTox net: a robust predictor for hERG channel blockade based on deep learning meta-feature ensembles. Journal of Cheminformatics 13, 1-13 (2021).", + "31. Korotcov, A., Tkachenko, V., Russo, D. P. & Ekins, S. Comparison of deep learning with multiple machine learning methods and metrics using diverse drug discovery data sets. Molecular pharmaceutics 14, 4462-4475 (2017).", + "32. Wong, L., You, Z.-H., Guo, Z.-H., Yi, H.-C., Chen, Z.-H. & Cao, M.-Y. MIPDH: a novel computational model for predicting microRNA-mRNA interactions by DeepWalk on a heterogeneous network. ACS omega 5, 17022-17032 (2020).", + "33. Fu, T., Huang, K., Xiao, C., Glass, L. M. & Sun, J. Hint: Hierarchical interaction network for clinical-trial-outcome predictions. *Patterns* 3 (2022).", + "34. Weber, A., Born, J. & Rodriguez Martínez, M. TITAN: T-cell receptor specificity prediction with bimodal attention networks. Bioinformatics 37, i237-i244 (2021).", + "35. Lam, H. T., Sbodio, M. L., Galindo, M. M., Zayats, M., Fernandez-Diaz, R., Valls, V., Picco, G., Ramis, C. B. & Lopez, V. Otter-Knowledge: benchmarks of multimodal knowledge graph representation learning from different sources for drug discovery. arXiv preprint arXiv:2306.12802 (2023).", + "36. Kinnings, S. L., Liu, N., Tonge, P. J., Jackson, R. M., Xie, L. & Bourne, P. E. A machine learning-based method to improve docking scoring functions and its application to drug repurposing. Journal of chemical information and modeling 51, 408-419 (2011).", + "37. Kalemati, M., Zamani Emani, M. & Koohi, S. BiComp-DTA: Drug-target binding affinity prediction through complementary biological-related and compression-based featurization approach. PLOS Computational Biology 19, e1011036 (2023).", + "38. Wei, B. & Gong, X. DeepPLA: a novel deep learning-based model for protein-ligand binding affinity prediction (2021).", + "39. Probst, D., Schwaller, P. & Reymond, J.-L. Reaction classification and yield prediction using the differential reaction fingerprint DRFP. Digital discovery 1, 91-97 (2022).", + "40. Rivera, Z. A., Tayo, L., Chen, B.-Y. & Tsai, P.-W. In silico Evaluation of the Feasibility of Magnolia officinalis Electronshutting Compounds as Parkinson's Disease Remedy. Letters in Drug Design & Discovery 21, 3039-3048 (2024).", + "41. Pei, Q., Wu, L., Zhu, J., Xia, Y., Xie, S., Qin, T., Liu, H., Liu, T.-Y. & Yan, R. Breaking the barriers of data scarcity in drug-target affinity prediction. Briefings in Bioinformatics 24, bbad386 (2023).", + "42. Xia, F., Shukla, M., Brettin, T., Garcia-Cardona, C., Cohn, J., Allen, J. E., Maslov, S., Holbeck, S. L., Doroshow, J. H., Evrard, Y. A., et al. Predicting tumor cell line response to drug pairs with deep learning. BMC bioinformatics 19, 71-79 (2018).", + "43. Lind, A. P. & Anderson, P. C. Predicting drug activity against cancer cells by random forest models based on minimal genomic information and chemical properties. *PloS one* 14, e0219774 (2019).", + "44. Euclidia. https://github.com/euclidia/public-models. 2023.", + "45. Leenay, R. T., Aghazadeh, A., Hiatt, J., Tse, D., Roth, T. L., Apathy, R., Shifrut, E., Hultquist, J. F., Krogan, N., Wu, Z., et al. Large dataset enables prediction of repair after CRISPR-Cas9 editing in primary T cells. Nature biotechnology 37, 1034-1037 (2019).", + "46. Yang, K., Swanson, K., Jin, W., Coley, C., Eiden, P., Gao, H., Guzman-Perez, A., Hopper, T., Kelley, B., Mathea, M., et al. Analyzing learned molecular representations for property prediction. Journal of chemical information and modeling 59, 3370-3388 (2019).", + "47. Preuer, K., Lewis, R. P., Hochreiter, S., Bender, A., Bulusu, K. C. & Klambauer, G. DeepSynergy: predicting anti-cancer drug synergy with Deep Learning. Bioinformatics 34, 1538-1546 (2018).", + "48. Zheng, S., Rao, J., Zhang, Z., Xu, J. & Yang, Y. Predicting retrosynthetic reactions using self-corrected transformer neural networks. Journal of chemical information and modeling 60, 47-55 (2019).", + "49. Boral, N., Ghosh, P., Goswami, A. & Bhattacharyya, M. Accountable prediction of drug ADMET Properties with molecular descriptors. bioRxiv, 2022-06 (2022).", + "50. Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D. & Steinhardt, J. Measuring massive multitask language understanding. arXiv preprint arXiv:2009.03300 (2020)." + ], + "bbox": [ + 109, + 90, + 887, + 679 + ], + "page_idx": 57 + }, + { + "type": "page_number", + "text": "58", + "bbox": [ + 862, + 936, + 883, + 948 + ], + "page_idx": 57 + } +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06196/00c24729-2bee-4726-a0b4-13a163fe9cf3_model.json b/data/2025/2504_06xxx/2504.06196/00c24729-2bee-4726-a0b4-13a163fe9cf3_model.json new file mode 100644 index 0000000000000000000000000000000000000000..73c59d74c5c38ea97e7dbcf483309eb552670a07 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/00c24729-2bee-4726-a0b4-13a163fe9cf3_model.json @@ -0,0 +1,8291 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.277, + 0.06, + 0.702 + ], + "angle": 270, + "content": "arXiv:2504.06196v1 [cs.AI] 8 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.394, + 0.085, + 0.571, + 0.112 + ], + "angle": 0, + "content": "TxGemma:" + }, + { + "type": "title", + "bbox": [ + 0.126, + 0.128, + 0.834, + 0.159 + ], + "angle": 0, + "content": "Efficient and Agentic LLMs for Therapeutics" + }, + { + "type": "text", + "bbox": [ + 0.154, + 0.176, + 0.807, + 0.216 + ], + "angle": 0, + "content": "Eric Wang*,†,1, Samuel Schmidgall*,1, Paul F. Jaeger1, Fan Zhang2, Rory Pilgrim2, Yossi Matias2, Joelle Barral1, David Fleet1 and Shekoofeh Azizi†,1" + }, + { + "type": "text", + "bbox": [ + 0.318, + 0.223, + 0.642, + 0.243 + ], + "angle": 0, + "content": "\\(^{1}\\)Google DeepMind, \\(^{2}\\)Google Research" + }, + { + "type": "text", + "bbox": [ + 0.144, + 0.285, + 0.845, + 0.603 + ], + "angle": 0, + "content": "Therapeutic development is a costly and high-risk endeavor that is often plagued by high failure rates. To address this, we introduce TxGemma, a suite of efficient, generalist large language models (LLMs) capable of therapeutic property prediction as well as interactive reasoning and explainability. Unlike task-specific models, TxGemma synthesizes information from diverse sources, enabling broad application across the therapeutic development pipeline. The suite includes 2B, 9B, and 27B parameter models, fine-tuned from Gemma-2 on a comprehensive dataset of small molecules, proteins, nucleic acids, diseases, and cell lines. Across 66 therapeutic development tasks, TxGemma achieved superior or comparable performance to the state-of-the-art generalist model on 64 (superior on 45), and against state-of-the-art specialist models on 50 (superior on 26). Fine-tuning TxGemma models on therapeutic downstream tasks, such as clinical trial adverse event prediction, requires less training data than fine-tuning base LLMs, making TxGemma suitable for data-limited applications. Beyond these predictive capabilities, TxGemma features conversational models that bridge the gap between general LLMs and specialized property predictors. These allow scientists to interact in natural language, provide mechanistic reasoning for predictions based on molecular structure, and engage in scientific discussions. Building on this, we further introduce Agentic-Tx, a generalist therapeutic agentic system powered by Gemini 2.5 that reasons, acts, manages diverse workflows, and acquires external domain knowledge. Agentic-Tx surpasses prior leading models on the Humanity's Last Exam benchmark (Chemistry & Biology) with \\(52.3\\%\\) relative improvement over o3-mini (high) and \\(26.7\\%\\) over o3-mini (high) on GPQA (Chemistry). On ChemBench, TxGemma excels with improvements of \\(6.3\\%\\) (ChemBench-Preference) and \\(2.4\\%\\) (ChemBench-Mini) over o3-mini (high), as well as \\(17.7\\%\\) and \\(5.6\\%\\) over o1, respectively. TxGemma's collection is released as open models, enabling researchers to adapt and validate it on their own diverse datasets, thus facilitating more challenging real-world applications." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.627, + 0.262, + 0.644 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.657, + 0.886, + 0.749 + ], + "angle": 0, + "content": "The pharmaceutical industry faces significant challenges in bringing new therapeutics to market. High attrition rates and lengthy, costly development timelines [3, 4] necessitate innovative approaches to therapeutic development. Success requires a drug candidate to not only demonstrate efficacy but also possess favorable safety, metabolic stability, pharmacokinetic/pharmacodynamic properties and developability, among other characteristics. Determining these diverse characteristics often relies on a large array of complex and expensive experimental procedures, highlighting the need for more efficient methods." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.754, + 0.888, + 0.862 + ], + "angle": 0, + "content": "Computational approaches, such as machine learning, are emerging as powerful tools to address these challenges. Leveraging predictive models trained on curated datasets allows researchers to prioritize promising candidates early in the development process, reducing reliance on costly experimental assays [5]. Publicly available databases of molecular properties and biological activity are crucial for training and validating these models. In this area, a major development was the curation of the Therapeutics Data Commons (TDC) [6, 7, 8], which contains datasets and benchmarks for many different tasks throughout the therapeutic development pipeline, ranging from early-stage target identification to late-stage clinical trial approval." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.866, + 0.888, + 0.913 + ], + "angle": 0, + "content": "Recent advancements in large language models (LLMs) offer a compelling opportunity to leverage available datasets and address limitations in the therapeutic development process. LLMs have demonstrated the capacity to integrate and learn from diverse data sources across various domains, including scientific applications [9, 10," + }, + { + "type": "page_footnote", + "bbox": [ + 0.111, + 0.935, + 0.252, + 0.947 + ], + "angle": 0, + "content": "* Equal contributions." + }, + { + "type": "page_footnote", + "bbox": [ + 0.111, + 0.947, + 0.483, + 0.962 + ], + "angle": 0, + "content": "† Corresponding authors: {shekazizi, ericzwang}@google.com" + }, + { + "type": "list", + "bbox": [ + 0.111, + 0.935, + 0.483, + 0.962 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "image", + "bbox": [ + 0.115, + 0.086, + 0.885, + 0.371 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.386, + 0.482, + 0.513 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.538, + 0.482, + 0.668 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.119, + 0.676, + 0.476, + 0.726 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.388, + 0.88, + 0.476 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.48, + 0.88, + 0.559 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.563, + 0.882, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.647, + 0.884, + 0.726 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.737, + 0.887, + 0.935 + ], + "angle": 0, + "content": "Figure 1 | Overview of TxGemma. (top) All TxGemma variants are trained on diverse data sources of the Therapeutic Data Commons (TDC). TxGemma-Predict comes in three size variants (2B, 9B, and 27B) and is trained for high-performance predictions on a broad set of therapeutic development tasks. TxGemma-Chat features two variants (9B and 27B) and is trained on a combination of TDC data with general Gemma-2 instruction tuning data to retain conversational and reasoning capabilities. Agentic-Tx, a therapeutics-focused agentic system powered by Gemini 2.5, has access to 18 tools including TxGemma-Predict and TxGemma-Chat to collect external knowledge and manages complex tasks in either autonomous or interactive settings. (bottom-right) Absolute performance of Agentic-Tx compared to best-in-class models on three complex therapeutic-related reasoning benchmarks. The state-of-the-art (SOTA) values are obtained from [1, 2] and details are listed in Table 3. Dashed lines: L=lowest, M=mean, H=highest human scores. (bottom-left) Relative performance changes of TxGemma-Predict compared to the SOTA generalist model for each task type. The assignment of the 66 evaluated TDC tasks to task types is shown in Tables S.2 and S.3. The bottom bar chart shows a summary of results where TxGemma-Predict outperforms or nearly matches SOTA (light blue), and outperforms SOTA (darker blue)." + }, + { + "type": "page_number", + "bbox": [ + 0.872, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.09, + 0.887, + 0.181 + ], + "angle": 0, + "content": "11]. Their potential to connect disparate aspects of drug development, such as chemical structure, biological activity, and clinical trial outcomes, is particularly exciting. In this context, we have previously introduced Tx-LLM, a LLM fine-tuned from a collection of question-answer instruction-tuning datasets based on TDC [12]. While promising, the model's lack of conversational capabilities prevented reasoning or user interaction, limiting its value for scientists who require a model that can understand complex queries and engage in nuanced discussions." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.187, + 0.887, + 0.295 + ], + "angle": 0, + "content": "In this work, we introduce TxGemma, a suite of efficient, generalist LLMs trained for therapeutics. Building on, but significantly extending, our previous work [12], TxGemma leverages LLMs to synthesize information from diverse sources. The suite includes 2B, 9B, and 27B parameter models, fine-tuned from Gemma-2 [13, 14] using a collection of therapeutic instruction-tuning datasets encompassing small molecules, proteins, nucleic acids, diseases, and cell lines. For the first time in therapeutic AI, TxGemma features conversational counterparts capable of reasoning and explanation, moving beyond black-box predictions to facilitate mechanistic understanding and scientific discussions. Our key contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.307, + 0.885, + 0.43 + ], + "angle": 0, + "content": "- Efficient Generalist Therapeutic LLMs: TxGemma represents a potential shift from task-specific AI to efficient generalist models in therapeutic development. These efficient LLMs (2B-27B parameters) offer a competitive alternative to specialized models, achieving strong performance across a broad range of predictive and generative tasks. Out of 66 therapeutic development tasks curated by TDC, TxGemma-Predict outperforms or nearly matches the state-of-the-art generalist model on 64 (outperforms on 45) and state-of-the-art specialist models on 50 (outperforms on 26). Additionally, fine-tuning TxGemma models on clinical trial adverse event prediction requires less data to achieve strong performance compared to base Gemma-2 models, an important advantage for data-limited fields." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.436, + 0.885, + 0.498 + ], + "angle": 0, + "content": "- Explainable and Interactive Therapeutic Models: TxGemma-Chat introduces reasoning and explanation capabilities, bridging the gap between general LLMs and specialized property predictors. Scientists can interact with TxGemma-Chat using natural language, exploring complex questions, receive explanations for predictions (e.g., based on molecular structure), and engage in scientific discussions." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.505, + 0.885, + 0.582 + ], + "angle": 0, + "content": "- Agentic Orchestration of Therapeutic Development Workflows: We further introduce Agentic-Tx, a therapeutics-focused agentic system powered by Gemini 2.5, demonstrating how TxGemma models can be integrated as tools. Equipped with 18 tools, Agentic-Tx solves complex, multi-step problems, achieving state-of-the-art results on reasoning-intensive chemistry and biology benchmarks, including Humanity's Last Exam [15] and ChemBench [1]." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.588, + 0.885, + 0.665 + ], + "angle": 0, + "content": "- Enabling Innovative Research with Open Models: Understanding the prevalence of proprietary data in therapeutic research, we release TxGemma models trained only on datasets with commercial licenses as open models to empower researchers to adapt and refine them on their own data. This facilitates validation and potential performance improvements tailored to their specific research needs, paving the way for therapy safety and efficacy in more challenging real-world therapeutic applications." + }, + { + "type": "list", + "bbox": [ + 0.127, + 0.307, + 0.885, + 0.665 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.682, + 0.224, + 0.697 + ], + "angle": 0, + "content": "2 Methods" + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.715, + 0.194, + 0.729 + ], + "angle": 0, + "content": "2.1 Data" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.74, + 0.887, + 0.879 + ], + "angle": 0, + "content": "Therapeutic Data Commons (TDC) We leverage the Therapeutic Data Commons (TDC) [7, 6], a comprehensive collection of 66 AI-ready datasets spanning the drug discovery and development pipeline. TDC includes over 15 million datapoints across various biomedical entities and encompasses single-instance prediction, multi-instance prediction, and generation tasks [7]. We focus on TDC tasks relevant to drug discovery, incorporating diverse therapeutic representations: SMILES strings (small molecules), amino acid sequences (proteins and peptides, including specialized representations for MHC molecules and T-cell receptors), nucleotide sequences (nucleic acids), and natural language text (disease/cell line names) (see Table S.6 for examples). Many tasks combine multiple representations. (See Table S.1 for task inclusion criteria and Tables S.7 and S.8 for biological contexts of certain tasks.)" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.883, + 0.885, + 0.915 + ], + "angle": 0, + "content": "Therapeutic Instruction-Tuning Following Chaves et al. [12], we transform the raw TDC data into an instruction-tuning format suitable for LLMs. Each data point is formatted as a prompt:" + }, + { + "type": "page_number", + "bbox": [ + 0.871, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.136, + 0.091, + 0.443, + 0.104 + ], + "angle": 0, + "content": "- Instruction: Briefly describes the task." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.105, + 0.884, + 0.133 + ], + "angle": 0, + "content": "- Context: Provides 2-3 sentences of relevant biochemical background, derived from TDC descriptions and literature." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.136, + 0.884, + 0.166 + ], + "angle": 0, + "content": "- Question: Queries a specific therapeutic property, incorporating textual representations of therapeutics and/or targets (e.g., \"Does the following molecule cross the blood-brain barrier? \")." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.167, + 0.884, + 0.195 + ], + "angle": 0, + "content": "- Answer: Formatted as (A)/(B) for binary classification, a binned continuous value for regression, or a SMILES string for generation." + }, + { + "type": "list", + "bbox": [ + 0.136, + 0.091, + 0.884, + 0.195 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.212, + 0.886, + 0.258 + ], + "angle": 0, + "content": "This process yields 7,080,338 training, 956,575 validation, and 1,917,297 test data points (Figure S.1, Tables S.2 and S.3). Data splits closely follow TDC's recommended methodologies (random, scaffold, cold-start, combination, temporal) (Table S.2, Table S.3). Detailed task descriptions are in Tables S.4 and S.5." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.263, + 0.886, + 0.34 + ], + "angle": 0, + "content": "We employ a few-shot prompting strategy to promote in-context learning [16], using a blend of \\(70\\%\\) zero-shot and \\(30\\%\\) few-shot prompts [17, 12]. For few-shot prompts, we randomly sample examples from the training set (Table S.9), as intra-training set similarity is higher than training-test set similarity (Figure S.2). The number of examples is uniformly selected between 1 and 10 so that few-shot prompting is robust to the number of examples during evaluation." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.352, + 0.235, + 0.369 + ], + "angle": 0, + "content": "2.2 Modeling" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.378, + 0.886, + 0.456 + ], + "angle": 0, + "content": "Base LLM. TxGemma is built upon the Gemma-2 [14] family of lightweight, state-of-the-art open LLMs. Gemma-2 models utilize a decoder-only transformer architecture, incorporating architectural modifications such as interleaved local-global attention and group-query attention, and are trained using Gemini technology [18]. We utilize Gemma-2 base models at 2B, 9B, and 27B parameters. 2B and 9B Gemma-2 models were initially trained via knowledge distillation [14]." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.459, + 0.886, + 0.582 + ], + "angle": 0, + "content": "Predictive Model Fine-Tuning. We fine-tune the 2B, 9B, and 27B Gemma-2 base models on the therapeutic instruction-tuning data derived from TDC, creating TxGemma-2B-Predict, TxGemma-9B-Predict, and TxGemma-27B-Predict, respectively. Training was performed across all TDC tasks, with mixture ratios proportional to the number of training data points (see Tables S.2 and S.3 for data distribution). This encompassed all approximately 7 million training examples, comprising 3.3 million from regression/generation and 3.7 million from binary classification tasks. Fine-tuning proceeded for 67B tokens (12 epochs) using 256 TPUv4 chips with 8-way data replication, 4-way sequence sharding, and 4-way model sharding. In this work, \"TxGemma\" generally refers to the generalist, predictive TxGemma-27B-Predict." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.587, + 0.886, + 0.663 + ], + "angle": 0, + "content": "Conversational Model Fine-Tuning. We also trained conversational counterparts, TxGemma-9B-Chat and TxGemma-27B-Chat, by supplementing the therapeutic instruction-tuning data with general instruction-tuning data, as detailed in the Gemma-2 report [14]. The training data mixture comprised \\(30\\%\\) therapeutic data and \\(70\\%\\) general instruction-tuning data. Conversational models were trained using the same number of tokens and TPU configuration as the predictive models." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.675, + 0.462, + 0.692 + ], + "angle": 0, + "content": "2.3 Evaluating Predictive Performance" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.702, + 0.886, + 0.809 + ], + "angle": 0, + "content": "Prompting strategy For test set evaluations, we use 10-shot prompting, selecting exemplars from the nearest neighbors within the combined training and validation set (not the test set), as detailed in Table S.9. Nearest neighbors were determined using different methods based on molecule type. For small molecules, we used RDKit [19] to generate Morgan fingerprints (radius 2 and size 2048), representing molecular substructures as binary vectors. Subsequently, we used Chemfp [20] to compute Tanimoto similarities, which quantify fingerprint overlap. For amino acid and nucleotide sequences, nearest neighbors were defined by percent sequence identity, determined through multiple sequence alignments performed with Clustal Omega [21]." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.814, + 0.886, + 0.906 + ], + "angle": 0, + "content": "Performance Metrics and Statistical Tests We assess performance using the preferred metrics for each task, as defined by TDC [7] and used by previous models. Binary classification tasks are assessed with area under the receiver operating characteristic curve (AUROC), area under the precision-recall curve (AUPRC), and accuracy. Regression tasks use Spearman's and Pearson correlation coefficients, mean absolute error (MAE), and mean squared error (MSE). The USPTO generation task uses \"set accuracy,\" scoring 1 for perfect overlap between predicted and true reactant sets, and 0 otherwise. Bootstrapped metrics are calculated" + }, + { + "type": "page_number", + "bbox": [ + 0.871, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.156, + 0.1, + 0.468, + 0.134 + ], + "angle": 0, + "content": "Prompt: Imagine an early virtual screening campaign setting. Which of the following two candidates would you prefer for further development?" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.146, + 0.265, + 0.156 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.162, + 0.267, + 0.171 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.537, + 0.101, + 0.794, + 0.112 + ], + "angle": 0, + "content": "Agent \\(\\rightarrow\\) TxGemma-ClinTox: Is the following toxic?" + }, + { + "type": "text", + "bbox": [ + 0.538, + 0.113, + 0.646, + 0.122 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.538, + 0.124, + 0.666, + 0.134 + ], + "angle": 0, + "content": "TxGemma ClinTox: Toxic" + }, + { + "type": "text", + "bbox": [ + 0.537, + 0.145, + 0.796, + 0.155 + ], + "angle": 0, + "content": "Agent \\(\\rightarrow\\) TxGemma-ClinTox: Is the following toxic?" + }, + { + "type": "text", + "bbox": [ + 0.538, + 0.156, + 0.646, + 0.166 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.538, + 0.168, + 0.691, + 0.177 + ], + "angle": 0, + "content": "TxGemma ClinTox: Non-toxic" + }, + { + "type": "image", + "bbox": [ + 0.165, + 0.19, + 0.834, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.362, + 0.468, + 0.393 + ], + "angle": 0, + "content": "Agent \\(\\rightarrow\\) TxGemma-Chat: Given two drug candidates, what factors would influence your decision to prioritize one over the other in an early virtual screening campaign?" + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.398, + 0.461, + 0.42 + ], + "angle": 0, + "content": "TxGemma-Chat: Investigate whether the drugs would pass through a clinical trial, based on properties such as toxicity." + }, + { + "type": "text", + "bbox": [ + 0.614, + 0.385, + 0.829, + 0.396 + ], + "angle": 0, + "content": "Agent: Final answer: " + }, + { + "type": "text", + "bbox": [ + 0.15, + 0.444, + 0.307, + 0.458 + ], + "angle": 0, + "content": "Natural Language Input" + }, + { + "type": "text", + "bbox": [ + 0.356, + 0.444, + 0.464, + 0.456 + ], + "angle": 0, + "content": "Reason + Action" + }, + { + "type": "text", + "bbox": [ + 0.565, + 0.444, + 0.626, + 0.455 + ], + "angle": 0, + "content": "Tool-use" + }, + { + "type": "text", + "bbox": [ + 0.728, + 0.444, + 0.814, + 0.455 + ], + "angle": 0, + "content": "Final Answer" + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.482, + 0.887, + 0.603 + ], + "angle": 0, + "content": "Figure 2 | Example workflow of agentic planning and execution with Agentic-Tx. Agentic-Tx uses the ReAct framework [22] to interleave thought with tool-usage. When a user poses a query, Agentic-Tx checks whether the query structure matches any defined tool trigger. If so, the query is routed to the corresponding tool, which (i) parses the request, (ii) invokes specialized logic, and (iii) returns a structured answer to the agent. The agent then composes a user-facing response. This adaptive tool-use mechanism is especially helpful for tasks that require external references, chemical data transformations, or precise chemical information, areas where self-contained LLMs often hallucinate. In the displayed example, Agentic-Tx uses two tools to solve a complex therapeutic task: TxGemma-Chat and the clinical toxicity prediction tool based on TxGemma-Predict." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.638, + 0.885, + 0.67 + ], + "angle": 0, + "content": "using 1000 samples. To compare overall performance between two models across all TDC tasks, we use the non-parametric Wilcoxon signed-rank test and report the corresponding p-value (details in Appendix C.1)." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.681, + 0.29, + 0.697 + ], + "angle": 0, + "content": "2.4 Agentic System" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.707, + 0.886, + 0.799 + ], + "angle": 0, + "content": "One limitation of LLMs for discovery is that, while their prediction capabilities are powerful, they do not have access to up-to-date external knowledge, such as research articles or domain-specific prediction models. These knowledge cut-offs prevent the model from answering questions outside of its training scope. Additionally, some questions involve multiple reasoning steps to solve, for example, the question \"What structural modifications could improve the potency of the given drug?\" requires iteratively searching the drug's structural space and then prompting TxGemma to predict potency." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.804, + 0.888, + 0.85 + ], + "angle": 0, + "content": "Agentic-Tx, our therapeutics-focused agentic system powered by Gemini 2.5 [18], extends TxGemma's capabilities by orchestrating such complex workflows. Agentic-Tx employs a modular, tool-usage paradigm, in contrast to TxGemma's direct generation of solutions." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.855, + 0.886, + 0.901 + ], + "angle": 0, + "content": "Reasoning and Action Framework Agentic-Tx utilizes the ReAct framework [22], allowing it to interleave reasoning steps (\"thoughts\") with actions (tool use). The agentic system receives a task or question and iteratively takes actions based on its current context. Each action typically involves using a tool, which" + }, + { + "type": "page_number", + "bbox": [ + 0.871, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.091, + 0.887, + 0.183 + ], + "angle": 0, + "content": "returns an observation. Key to ReAct is this iterative process of observing, reasoning, and acting, allowing Agentic-Tx to dynamically adjust its approach based on the information it gathers. Because tools may return large outputs, we summarize these observations in order to maintain a concise and relevant context. This iterative process of observing, reasoning, acting, and updating its context allows Agentic-Tx to dynamically adjust its approach and gather the necessary information to answer the initial query. Finally, Agentic-Tx integrates the gathered information and formulates a user-friendly response." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.187, + 0.887, + 0.218 + ], + "angle": 0, + "content": "Agentic Tools Agentic-Tx is equipped with 18 tools across four categories (detailed tool descriptions are in Table S.12). They can be broadly categorized as:" + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.233, + 0.884, + 0.308 + ], + "angle": 0, + "content": "1. TxGemma-based Tools: These provide access to TxGemma's capabilities. The Chat tool enables interaction with TxGemma-27B-Chat. The ClinicalTox and ToxCast tools utilize TxGemma-27B-Predict for toxicity predictions. \\( IC_{50} \\) returns the predicted normalized \\( IC_{50} \\) between a drug and protein, the Mutagenicity tool predicts drug mutagenicity, and the Phase1 Trial tool predicts whether a drug would pass a Phase 1 clinical trial." + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.31, + 0.884, + 0.338 + ], + "angle": 0, + "content": "2. General Tools: These query external knowledge resources, including PubMed, Wikipedia, and the web." + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.34, + 0.884, + 0.37 + ], + "angle": 0, + "content": "3. Molecule Tools: These leverage domain-specific libraries for tasks such as retrieving molecular descriptors (e.g., from PubChem) and performing chemical structure conversions." + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.371, + 0.885, + 0.4 + ], + "angle": 0, + "content": "4. Gene & Protein Tools: These leverage domain-specific libraries for tasks involving genes or proteins, such as retrieving gene descriptions and protein descriptions (e.g., from the NCBI Gene database)." + }, + { + "type": "list", + "bbox": [ + 0.13, + 0.233, + 0.885, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.42, + 0.211, + 0.435 + ], + "angle": 0, + "content": "3 Results" + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.453, + 0.46, + 0.468 + ], + "angle": 0, + "content": "3.1 TxGemma Predictive Performance" + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.481, + 0.573, + 0.496 + ], + "angle": 0, + "content": "3.1.1 Comparison with best-in-class therapeutic models" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.507, + 0.888, + 0.675 + ], + "angle": 0, + "content": "To provide a comprehensive evaluation of our models' predictive capabilities, we benchmark against both specialist and generalist baselines. For specialist comparisons, we define best-in-class performance metrics for each task using previous models. Specifically, we utilize TDC leaderboard scores for tasks where available (ADMET, DrugCombo, DTI DG). For remaining tasks, values are reported from a literature review and are detailed in Tables S.13 and S.14. These specialist performance values align with those reported in Chaves et al. [12]. Additionally, we compare our models against three prominent therapeutic generalist and multi-task models: Tx-LLM [12], LlaSMol [23], and MolE [24]. Tx-LLM, with its two size-variants S and M, shares similar training data to our approach enabling a direct comparison across all tasks. LlaSMol a suite of generalist models built upon fine-tuned open-source LLMs trained for small-molecule applications [23]. Similarly, MolE was developed as a graph-based multi-task foundation model for small molecules. LlaSMol and MolE, specialized for small molecules, offer strong baselines for small molecule tasks." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.679, + 0.888, + 0.802 + ], + "angle": 0, + "content": "TxGemma shows improved performance compared to therapeutic generalist models In Figure 3, we compare the performance of TxGemma-27B-Predict to the two existing models in the Tx-LLM [12] family, Tx-LLM M and Tx-LLM S, built over PaLM-2 on TDC tasks. TxGemma-27B-Predict surpasses Tx-LLM M on 45 out of 66 tasks, while underperforming on 21. In addition, it outperforms Tx-LLM S on 62 and underperforms Tx-LLM S on only 4. Aggregating performance over task, we observe a statistically significant improvement of TxGemma-27B-Predict over Tx-LLM models \\((p = 0.003\\), Wilcoxon signed-rank test). These results demonstrate that TxGemma provides a highly competitive alternative to its predecessor with improved functionality at a substantially reduced model size." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.807, + 0.888, + 0.913 + ], + "angle": 0, + "content": "TxGemma is competitive with specialist therapeutic models Figure 4 and Figure S.4 compare TxGemma's performance with best-in-class specialist model across tasks containing various combinations of SMILES, amino acid, nucleotide, and text inputs. In a comparison with specialist best-in-class models, TxGemma-27B-Predict outperforms the state-of-the-art (SOTA) on 26 and performs near SOTA on 50. This is a substantial improvement over its predecessor Tx-LLM M, which outperformed SOTA on 22 tasks and near SOTA on 43. These results demonstrate the improved capabilities of TxGemma-27B-Predict and its competitiveness with current specialist models designed for specific tasks and therapeutic feature types." + }, + { + "type": "page_number", + "bbox": [ + 0.872, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.115, + 0.088, + 0.885, + 0.499 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.522, + 0.889, + 0.645 + ], + "angle": 0, + "content": "Figure 3 | Comparison of TxGemma-Predict's performance with therapeutic generalist models. (top) relative performance improvement of TxGemma-27B-Predict in comparison to Tx-LLM S. TxGemma-27B-Predict outperforms Tx-LLM S on 62 and underperforms on only 4. (bottom) relative performance improvement of TxGemma-27B-Predict in comparison to Tx-LLM M. TxGemma-27B-Predict outperforms Tx-LLM M on 45 out of 66 tasks, while underperforming on 21. When aggregating performance over task, we observe a net improvement of TxGemma-27B-Predict over Tx-LLM models, with a statistically significant difference \\((p = 0.003\\), Wilcoxon signed-rank test). These results establish TxGemma-27B-Predict as a competitive and functionally enhanced alternative at practical model sizes. Values for each task can be found in Tables S.15 and S.16." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.677, + 0.889, + 0.834 + ], + "angle": 0, + "content": "TxGemma performs similarly to multi-task models specialized for small molecules Table 1 and Figure S.6 compare the predictive performance of TxGemma-27B-Predict with MolE, a graph-based multi-task foundation model for small molecules. MolE performs within the \\(95\\%\\) CIs of TxGemma-27B-Predict for 15 out of 22 tasks. Furthermore, both TxGemma-27B-Predict and TxGemma-9B-Predict outperform LlaSMolMistral (7B), the top performing model from the LlaSMol suite, on 2 of 5 shared tasks and within \\(95\\%\\) CIs on 2 additional tasks (Table 2 and Figure S.5). All metrics for MolE and LlaSMol are reported from Mendez-Lucio et al. [24] and Yu et al. [23]. Given their specialization in small-molecule tasks, LlaSMol and MolE provide strong baselines for evaluating generalist models. Notably, TxGemma, a generalist model encompassing diverse drug types and many different tasks, achieves competitive performance with these dedicated models designed for a narrower range of small-molecule tasks." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.842, + 0.495, + 0.86 + ], + "angle": 0, + "content": "3.2 TxGemma Conversational Capabilities" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.869, + 0.886, + 0.901 + ], + "angle": 0, + "content": "While TxGemma-27B-Predict performs well on prediction tasks, training solely on instruction tuning data for therapeutic properties limits its conversational capacity. TxGemma-27B-Predict can engage in general" + }, + { + "type": "page_number", + "bbox": [ + 0.871, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.159, + 0.089, + 0.382, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.389, + 0.089, + 0.614, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.617, + 0.09, + 0.842, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.158, + 0.264, + 0.382, + 0.434 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.389, + 0.265, + 0.613, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.617, + 0.265, + 0.842, + 0.434 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.151, + 0.44, + 0.379, + 0.609 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.38, + 0.44, + 0.607, + 0.609 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.654, + 0.474, + 0.728, + 0.485 + ], + "angle": 0, + "content": "SMILES" + }, + { + "type": "image_footnote", + "bbox": [ + 0.655, + 0.487, + 0.749, + 0.497 + ], + "angle": 0, + "content": "Amino acid" + }, + { + "type": "image_footnote", + "bbox": [ + 0.655, + 0.5, + 0.767, + 0.51 + ], + "angle": 0, + "content": "SMILES + Text" + }, + { + "type": "image_footnote", + "bbox": [ + 0.655, + 0.513, + 0.826, + 0.523 + ], + "angle": 0, + "content": "Nucleotide + Amino acid" + }, + { + "type": "image_footnote", + "bbox": [ + 0.655, + 0.526, + 0.79, + 0.536 + ], + "angle": 0, + "content": "Amino acid + Text" + }, + { + "type": "image_footnote", + "bbox": [ + 0.655, + 0.538, + 0.807, + 0.549 + ], + "angle": 0, + "content": "Amino acid + SMILES" + }, + { + "type": "image_footnote", + "bbox": [ + 0.655, + 0.552, + 0.746, + 0.562 + ], + "angle": 0, + "content": "Nucleotide" + }, + { + "type": "list", + "bbox": [ + 0.654, + 0.474, + 0.826, + 0.562 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.626, + 0.888, + 0.765 + ], + "angle": 0, + "content": "Figure 4 | Comparison of TxGemma's performance with best-in-class specialist models. TxGemma-27B-Predict is evaluated on each task in TDC and compared to the corresponding best-in-class competitor. The panels depict different metrics used to evaluate the tasks. Tasks are colored by their feature types including one or a combination of SMILE, Amino acid, Nucleotide and text as indicated in the legend. Marker sizes illustrate the number of data points in the task on a log scale. The larger shaded area in blue indicates where TxGemma outperforms best-in-class models, while the narrower light blue shaded area indicates where TxGemma is performing near best-in-class model (defined as within \\(10\\%\\)). MAE and MSE values are log-transformed since the magnitudes of these values depend on the units of outputs. Generation accuracy is the fraction of correct SMILES strings in the USPTO generation task. Values for each task can also be found in Tables S.13 and S.14." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.797, + 0.888, + 0.906 + ], + "angle": 0, + "content": "conversation, but its performance deteriorates when prompts deviate from the expected format. Figure S.9 shows an example of such decline in TxGemma-27B-Predict's conversational capabilities. To expand the TxGemma family's capabilities and provide a more versatile tool with the ability to explain its reasoning, we trained TxGemma-Chat with a mix of therapeutic and general instruction-tuning data as detailed in Section 2.2. We evaluate these new conversational capabilities through a combination of standard LLM benchmarks and qualitative examples. We also run our models through assurance evaluations, as done for Gemma-3 [25], to verify that TxGemma models adhere to safety policies." + }, + { + "type": "page_number", + "bbox": [ + 0.872, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.097, + 0.888, + 0.173 + ], + "angle": 0, + "content": "Table 1 | Comparative performance of TxGemma and MolE on small molecule tasks. Details of the predictive performance of TxGemma-27B-Predict and MolE, a graph-based molecular multi-task foundation model, across various pharmacokinetics and toxicity tasks. Bold values indicate the best performance for each task. Metrics for MolE are reported from Mendez-Lucio et al. [24]. TxGemma-27B-Predict values are bootstrapped averages and \\(95\\%\\) CIs. These pharmacokinetics and toxicity tasks are publicly available in TDC [7]." + }, + { + "type": "table", + "bbox": [ + 0.12, + 0.18, + 0.885, + 0.57 + ], + "angle": 0, + "content": "
Task TypeTaskMetricMolE [24]TxGemma-27B-Predict
PharmacokineticsCaco2 WangMAE (↓)0.3290.401 (0.358-0.449)
Lipophilicity AstraZenecaMAE (↓)0.4060.538 (0.507-0.570)
Solubility AqSolDBMAE (↓)0.7760.907 (0.870-0.948)
PPBR AZMAE (↓)7.2299.048 (8.141-10.111)
HIA HouAUROC (↑)0.9840.988 (0.972-0.999)
Pgp BroccatelliAUROC (↑)0.9300.937 (0.904-0.964)
Bioavailability MaAUROC (↑)0.6400.694 (0.575-0.801)
BBB MartinsAUROC (↑)0.9030.908 (0.872-0.938)
CYP3A4 Substrate CarbonMangelsAUROC (↑)0.6920.691 (0.601-0.784)
CYP2D6 VeithAUPRC (↑)0.6790.683 (0.639-0.726)
CYP3A4 VeithAUPRC (↑)0.8760.854 (0.836-0.872)
CYP2C9 VeithAUPRC (↑)0.7820.798 (0.767-0.826)
CYP2D6 Substrate CarbonMangelsAUPRC (↑)0.6920.711 (0.570-0.830)
CYP2C9 Substrate CarbonMangelsAUPRC (↑)0.4090.438 (0.302-0.576)
VDss LombardoSpearman (↑)0.6440.559 (0.457-0.655)
Half Life ObachSpearman (↑)0.5780.458 (0.306-0.594)
Clearance Microsome AZSpearman (↑)0.6320.462 (0.353-0.565)
Clearance Hepatocyte AZSpearman (↑)0.4560.260 (0.129-0.384)
ToxicityLD50 ZhuMAE (↓)0.6020.627 (0.597-0.660)
hERGAUROC (↑)0.8350.885 (0.813-0.946)
AMESAUROC (↑)0.8340.816 (0.795-0.838)
DILIAUROC (↑)0.8520.886 (0.810-0.947)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.591, + 0.889, + 0.667 + ], + "angle": 0, + "content": "Table 2 | Comparative performance of TxGemma and LlaSMol on small molecule tasks. Comparison of TxGemma-27B-Predict with LlaSMolMistral (best LlaSMol model at 7B) across shared small-molecule tasks. Bold values indicate the best performance for each task. Metrics for LlaSMolMistral are reported from Yu et al. [23]. TxGemma-Predict values are bootstrapped averages and \\(95\\%\\) CIs. These pharmacokinetics, toxicity, and high-throughput screening data and tasks are publicly available in TDC [7]" + }, + { + "type": "table", + "bbox": [ + 0.113, + 0.673, + 0.891, + 0.799 + ], + "angle": 0, + "content": "
Task TypeTaskMetricLlaSMolMistral [23]TxGemma-27B-PredictTxGemma-9B-Predict
PharmacokineticsBBBP†Accuracy (↑)0.7460.869 (0.835-0.901)0.847 (0.813-0.881)
ESOL†RMSE (↓)1.1501.250 (1.185-1.321)1.360 (1.246-1.480)
Lipo†RMSE (↓)1.0100.710 (0.668-0.752)0.742 (0.700-0.787)
ToxicityClintoxAccuracy (↑)0.9310.926 (0.896-0.956)0.925 (0.892-0.953)
High-throughput screeningHIV*Accuracy (↑)0.9670.968 (0.964-0.972)0.965 (0.961-0.969)
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.111, + 0.804, + 0.454, + 0.816 + ], + "angle": 0, + "content": "* To predict whether compounds have anti-HIV properties." + }, + { + "type": "table_footnote", + "bbox": [ + 0.111, + 0.816, + 0.526, + 0.829 + ], + "angle": 0, + "content": "† Task name is modified to match the nomenclature from Yu et al. [23]." + }, + { + "type": "list", + "bbox": [ + 0.111, + 0.804, + 0.526, + 0.829 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.853, + 0.889, + 0.915 + ], + "angle": 0, + "content": "TxGemma-Chat bridges the gap between property predictors and general language models To assess the performance of TxGemma-Chat as a general conversational LLM, we evaluated it on the Massive Multitask Language Understanding (MMLU) [26] benchmark, a comprehensive suite of 57 diverse tasks spanning mathematics, history, computer science, law, etc. This benchmark evaluates knowledge, reasoning," + }, + { + "type": "page_number", + "bbox": [ + 0.871, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.12, + 0.09, + 0.488, + 0.251 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.091, + 0.885, + 0.251 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.268, + 0.889, + 0.374 + ], + "angle": 0, + "content": "Figure 5 | TxGemma-Chat bridges the gap between property predictors and general LLMs. Each point represents a therapeutic task in the TDC. The figure depicts relative predictive performance changes of TxGemma-Chat in comparison to TxGemma-Predict (top) and Gemma-2 (bottom) for 9B variants left and 27B variants in right. As expected, TxGemma-27B-Predict outperforms TxGemma-27B-Chat on therapeutic tasks, with TxGemma-27B-Chat showing a \\(10.69\\%\\) median relative performance reduction. However, TxGemma-27B-Chat exceeds the Gemma-2-27B baseline by \\(29.67\\%\\) on TDC therapeutic tasks. Similarly, TxGemma-9B-Chat's performance is \\(10.32\\%\\) lower than TxGemma-9B-Predict's. Values for each task can be found in Tables S.15 and S.16." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.408, + 0.884, + 0.47 + ], + "angle": 0, + "content": "and problem-solving abilities across a wide range of academic subjects, providing a measure of overall language understanding. It comprises 14,079 multiple-choice questions, each with four possible answers. For this multiple-choice format, we took the model's prediction as the option with the highest log-likelihood in a zero-shot setting and report overall accuracy as well as per-subject accuracy." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.474, + 0.889, + 0.583 + ], + "angle": 0, + "content": "Figure S.7 compares the performance of TxGemma-27B-Chat, TxGemma-27B-Predict, and Gemma-2-27B on MMLU, a standard benchmark for evaluating general LLMs. TxGemma-27B-Chat achieves an accuracy of \\(73.87\\%\\), slightly lower than Gemma-2-27B's \\(75.38\\%\\), but TxGemma-27B-Chat shows slight improvements in areas such as medical genetics, high school statistics, and college chemistry. Furthermore, TxGemma-27B-Chat significantly outperforms TxGemma-27B-Predict, which has an accuracy of \\(53.60\\%\\). This suggests that while fine-tuning solely on therapeutic data can diminish general knowledge acquired during pre-training, incorporating general instruction-tuning data can mitigate this effect." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.587, + 0.889, + 0.725 + ], + "angle": 0, + "content": "Furthermore, we assess TxGemma-27B-Chat on all therapeutic tasks within TDC. Figure 5 compares the relative performance changes of TxGemma-27B-Chat to TxGemma-27B-Predict and Gemma-2-27B for both 9B and 27B variants across these tasks. As anticipated, TxGemma-27B-Predict outperforms TxGemma-27B-Chat on these predictive tasks, with a median relative performance reduction of \\(11\\%\\) observed for TxGemma-27B-Chat. Nevertheless, TxGemma-27B-Chat surpasses the baseline Gemma-2-27B, demonstrating a median relative improvement of \\(30\\%\\). Similarly, TxGemma-9B-Chat shows a \\(10\\%\\) median relative performance reduction compared to TxGemma-9B-Predict. Regression tasks experience the greatest performance decline from the general-purpose training. These results demonstrate how TxGemma-Chat bridges the gap between therapeutic property predictors and general LLMs, functioning as a unified model for both capabilities." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.729, + 0.889, + 0.868 + ], + "angle": 0, + "content": "TxGemma-Chat can provide reasoning for complex tasks. A particularly compelling application of conversational models lies in prompting them to explain their predictions to users. While general LLMs may possess some foundational knowledge concerning therapeutic challenges, they are not accurate for property prediction (Figure 5). In Figure 6, we prompt TxGemma-27B-Chat to answer a question regarding blood-brain barrier permeability using the BBB Martins prompt format. TxGemma-27B-Chat provides only the answer in the initial turn, but when given a subsequent prompt to articulate its rationale, the model provides mechanistic reasoning for its answer based on molecular solubility and the structure of the input molecule derived from the SMILES string. All of this reasoning occurred directly within the model weights, without requiring any preprocessing of the SMILES string." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.871, + 0.889, + 0.903 + ], + "angle": 0, + "content": "Interestingly, prompting structures enable TxGemma-Chat to provide additional reasoning on complex tasks. For instance, while the relationship between blood-brain barrier permeability and lipophilicity is intuitive, some" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.886, + 0.95 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.097, + 0.888, + 0.126 + ], + "angle": 0, + "content": "Table 3 | Performance of Agentic-Tx. Accuracy of Agentic-Tx compared with SOTA models on ChemBench, GPQA, and HLE benchmarks." + }, + { + "type": "table", + "bbox": [ + 0.145, + 0.133, + 0.859, + 0.398 + ], + "angle": 0, + "content": "
ModelChemBenchGPQA (Diamond)Humanity's Last Exam
MiniPreferenceChemistryChemistry & Biology
Agentic-Tx (Gemini 2.5-Pro)84.566.281.720.1
Agentic-Tx (Gemini 2.0-Pro)83.465.562.414.5
Agentic-Tx (Gemini 1.5-Pro)80.665.051.811.9
Claude-3.5 (Sonnet)73.0*60.0*†40.4-
GPT-4o72.0*59.0*43.8**3.8
Gemini 2.5-pro82.865.579.517.9
Gemini 2.0-pro79.658.453.311.1
Gemini 1.5-pro74.955.648.210.6
PaperQA2 [28]67.0*56.0*--
o180.0*56.0*64.7**12.3
o3-mini (medium)82.461.362.513.0
o3-mini (high)82.562.064.513.2
Human Expert (Average Performance)27.0---
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.151, + 0.403, + 0.625, + 0.418 + ], + "angle": 0, + "content": "\\((\\dagger)\\) Using ReAct framework, \\((^{*})\\) Extracted from [1], \\((^{**})\\) Extracted from [2]" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.442, + 0.888, + 0.52 + ], + "angle": 0, + "content": "tasks such as predicting clinical trial approval are more challenging to reason over. If TxGemma-27B-Chat is prompted to provide reasoning in the same manner as in Figure 6 for predicting clinical trial approval, TxGemma-27B-Chat refuses and directs the user to alternative sources. However, when modifying the original prompt, instructing the model to output reasoning steps before the final answer, it bypasses the refusal and restores reasoning capabilities (Figure S.10)." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.531, + 0.625, + 0.547 + ], + "angle": 0, + "content": "3.3 Agentic Planning and Execution based on TxGemma" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.557, + 0.888, + 0.71 + ], + "angle": 0, + "content": "Agentic-Tx demonstrates competitive performance on therapeutic benchmarks. We evaluate the capability of Agentic-Tx to assist with therapeutics tasks by means of questions from three benchmarks: GPQA (Diamond) [27], ChemBench [1], and Humanity's Last Exam (HLE) [15]. Within each benchmark, we use existing selections of therapeutic-relevant questions; for GPQA we evaluate GPQA-Chemistry (47 questions), for ChemBench we evaluate ChemBench-Chemical Preference which aims to select an ideal candidate molecule for therapeutic development (1,001 question) and ChemBench-mini, which evaluates across 8 categories of chemistry from toxicity/safety to organic chemistry (236 questions). Finally, for HLE, we evaluate HLE-Chemistry and HLE-Biology (235 questions). For open-ended questions in HLE, we observed a high variation of metric scores depending on the selection of the LLM-rater model [15]. To ensure an objective accuracy measure, we restrict the evaluation to multiple choice questions (MCQs)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.714, + 0.888, + 0.913 + ], + "angle": 0, + "content": "As shown in Table 3, Agentic-Tx (Gemini 2.5-Pro), Agentic-Tx (Gemini 2.0-Pro), and Agentic-Tx (Gemini 1.5-Pro) achieve competitive or greater accuracy compared to existing SOTA models across several benchmarks. Specifically, Agentic-Tx (Gemini 2.5-Pro) and Agentic-Tx (Gemini 2.0-Pro) surpasses prior SOTA models on the exceptionally difficult Humanity's Last Exam benchmark (Chemistry & Biology tasks), with Agentic-Tx (Gemini 2.5-Pro) achieving \\(52.3\\%\\) relative improvement over o3-mini (high) and \\(13.4\\%\\) over Gemini 2.5-pro, as well as on ChemBench, with relative improvements of \\(6.3\\%\\) (ChemBench-Preference) and \\(2.4\\%\\) (ChemBench-Mini) over o3-mini (high) and \\(1.1\\%\\) (ChemBench-Preference) and \\(2.0\\%\\) (ChemBench-Mini) over Gemini 2.5-pro. On GPQA (Diamond), Agentic-Tx also achieves SOTA accuracy with \\(26.7\\%\\) relative improvements over o3-mini and \\(2.7\\%\\) over Gemini 2.5-pro. All variants of Agentic-Tx outperform their corresponding base Gemini models across all benchmarks, indicating the effectiveness of the Agentic-Tx framework in enhancing LLMs efficacy for advanced reasoning within this domain. This suggests that agentic workflows such as ours represent useful tools for therapeutic development, particularly in areas requiring domain knowledge and the selection of candidate molecules. The agent's ability to leverage external tools and perform multi-step" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.884, + 0.95 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.139, + 0.097, + 0.5, + 0.109 + ], + "angle": 0, + "content": "Instructions: Answer the following question about drug properties." + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.111, + 0.732, + 0.145 + ], + "angle": 0, + "content": "Context: As a membrane separating circulating blood and brain extracellular fluid, the blood-brain barrier (BBB) is the protection layer that blocks most foreign drugs. Thus the ability of a drug to penetrate the barrier to deliver to the site of action forms a crucial challenge in development of drugs for central nervous system." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.147, + 0.443, + 0.158 + ], + "angle": 0, + "content": "Question: Given a drug SMILES string, predict whether it" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.158, + 0.393, + 0.169 + ], + "angle": 0, + "content": "(A) does not cross the BBB (B) crosses the BBB" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.173, + 0.612, + 0.185 + ], + "angle": 0, + "content": "Drug SMILES: C1CN(CCC1(C=O)NC(CCO)C2=CC=C(C=C2)Cl)N)C3=NC=NC4=C3C=CN44" + }, + { + "type": "title", + "bbox": [ + 0.264, + 0.209, + 0.327, + 0.222 + ], + "angle": 0, + "content": "TxGemma" + }, + { + "type": "text", + "bbox": [ + 0.265, + 0.226, + 0.331, + 0.238 + ], + "angle": 0, + "content": "Answer: (B)" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.257, + 0.441, + 0.269 + ], + "angle": 0, + "content": "Explain your reasoning based on the molecule structure." + }, + { + "type": "title", + "bbox": [ + 0.264, + 0.284, + 0.327, + 0.297 + ], + "angle": 0, + "content": "TxGemma" + }, + { + "type": "text", + "bbox": [ + 0.266, + 0.305, + 0.757, + 0.317 + ], + "angle": 0, + "content": "The molecule has several properties that suggest it can cross the blood-brain barrier (BBB):" + }, + { + "type": "text", + "bbox": [ + 0.265, + 0.327, + 0.859, + 0.35 + ], + "angle": 0, + "content": "Lipophilicity: The molecule contains multiple aromatic rings and a long alkyl chain, contributing to its lipophilicity. Lipophilicity is a key factor in BBB penetration, as the barrier is primarily composed of lipids." + }, + { + "type": "text", + "bbox": [ + 0.265, + 0.353, + 0.857, + 0.376 + ], + "angle": 0, + "content": "Molecular weight: The molecular weight is within a range (around 400 Da) that is generally considered favorable for BBB permeability." + }, + { + "type": "text", + "bbox": [ + 0.265, + 0.379, + 0.857, + 0.402 + ], + "angle": 0, + "content": "Hydrogen bond donors and acceptors: While there are hydrogen bond donors and acceptors present, they are not excessive, which can sometimes hinder BBB passage." + }, + { + "type": "text", + "bbox": [ + 0.265, + 0.404, + 0.857, + 0.428 + ], + "angle": 0, + "content": "Lack of charged groups: The molecule lacks any charged groups at physiological pH, making it less likely to be repelled by the BBB's tight junctions." + }, + { + "type": "text", + "bbox": [ + 0.265, + 0.43, + 0.857, + 0.453 + ], + "angle": 0, + "content": "Overall, the combination of lipophilicity, moderate molecular weight, balanced hydrogen bonding capacity, and lack of charged groups suggests that this molecule has a good chance of crossing the blood-brain barrier." + }, + { + "type": "text", + "bbox": [ + 0.138, + 0.475, + 0.713, + 0.487 + ], + "angle": 0, + "content": "What makes passing through the blood-brain barrier different from passive diffusion through a membrane?" + }, + { + "type": "title", + "bbox": [ + 0.264, + 0.503, + 0.327, + 0.515 + ], + "angle": 0, + "content": "TxGemma" + }, + { + "type": "text", + "bbox": [ + 0.266, + 0.528, + 0.857, + 0.551 + ], + "angle": 0, + "content": "While passive diffusion is a major mechanism for molecules to cross the blood-brain barrier (BBB), there are key differences that make BBB passage more challenging and selective:" + }, + { + "type": "text", + "bbox": [ + 0.272, + 0.553, + 0.857, + 0.586 + ], + "angle": 0, + "content": "1. Tight Junctions: The cells forming the BBB are packed very tightly together with specialized structures called tight junctions. These junctions seal the spaces between cells, significantly reducing the gaps through which molecules can passively diffuse." + }, + { + "type": "text", + "bbox": [ + 0.272, + 0.586, + 0.856, + 0.619 + ], + "angle": 0, + "content": "2. Specialized Transport Systems: The BBB has a variety of specialized transport systems that actively move specific molecules across the barrier. These systems are highly selective and often require molecules to have specific structural features or be recognized by specific receptors." + }, + { + "type": "text", + "bbox": [ + 0.272, + 0.619, + 0.856, + 0.651 + ], + "angle": 0, + "content": "3. Efflux Pumps: The BBB also expresses efflux pumps, which are proteins that actively pump molecules back out of the brain. This mechanism helps to prevent potentially harmful substances from entering the brain and maintain a stable brain environment." + }, + { + "type": "text", + "bbox": [ + 0.272, + 0.652, + 0.856, + 0.673 + ], + "angle": 0, + "content": "4. Metabolic Enzymes: The BBB contains enzymes that can metabolize and inactivate certain molecules, further limiting their entry into the brain." + }, + { + "type": "text", + "bbox": [ + 0.272, + 0.673, + 0.856, + 0.705 + ], + "angle": 0, + "content": "5. Astrocyte Endfeet: Astrocytes, a type of glial cell, have projections called endfeet that surround blood vessels in the brain. These endfeet contribute to the BBB's barrier function and regulate the passage of molecules." + }, + { + "type": "list", + "bbox": [ + 0.272, + 0.553, + 0.857, + 0.705 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.266, + 0.709, + 0.857, + 0.765 + ], + "angle": 0, + "content": "In summary, while passive diffusion plays a role in BBB permeability, the presence of tight junctions, specialized transport systems, efflux pumps, metabolic enzymes, and astrocyte endfeet make the BBB a highly selective barrier that restricts the entry of many molecules. This selectivity is crucial for protecting the brain from harmful substances and maintaining a stable internal environment, but it also poses challenges for drug delivery to the central nervous system." + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.787, + 0.391, + 0.799 + ], + "angle": 0, + "content": "Is my molecule likely to run into these issues?" + }, + { + "type": "title", + "bbox": [ + 0.264, + 0.814, + 0.327, + 0.827 + ], + "angle": 0, + "content": "TxGemma" + }, + { + "type": "text", + "bbox": [ + 0.265, + 0.831, + 0.751, + 0.843 + ], + "angle": 0, + "content": "Let's analyze how your molecule might be affected by the specific challenges of the BBB ..." + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.863, + 0.887, + 0.923 + ], + "angle": 0, + "content": "Figure 6 | TxGemma-Chat can be prompted to explain its reasoning or provide additional context. Following an initial question, TxGemma-27B-Chat is asked to explain its reasoning based on molecule structure in the second turn. The model uses its understanding of chemistry and biology to justify its answer and can continually engage with the user on follow-up questions." + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.091, + 0.772, + 0.106 + ], + "angle": 0, + "content": "reasoning enables it to address more complex queries beyond the scope of traditional LLMs." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.111, + 0.888, + 0.34 + ], + "angle": 0, + "content": "Agentic-Tx effectively leverages various tools based on the therapeutic task requirement. In Figure S.14, we investigate tool usage frequency within the Agentic-Tx system across the ChemBench-Preference and Biology and Chemistry (B&C) HLE datasets. Our analysis reveals that Agentic-Tx tool usage distribution varies significantly depending on the task and dataset. For the ChemBench-Preference task, which focuses on selecting ideal candidate molecules for therapeutic development, the Agentic-Tx system exhibits a high frequency of usage for tools such as SMILES description and toxicity prediction. This suggests a strong emphasis on molecular characterization and safety assessment in this task correctly invoked by Agentic-Tx. In contrast, on the B&C HLE dataset, tool usage is predominantly concentrated on general knowledge retrieval tools like PubMed or Wikipedia search. This indicates that the Agentic-Tx system relies heavily on accessing and synthesizing broad biological or chemical knowledge to address questions in these domains. In Figure S.15, we investigate the breakdown of tool interactions per question and explore how these interactions contribute to performance variations. Our analysis shows that each question can involve up to 8 tool calls, and the high usage of tools such as SMILES description and toxicity prediction tools correlates with overall performance improvement. These results highlight the Agentic-Tx system's adaptive nature, demonstrating its ability to leverage different tools based on the specific requirements of the task." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.351, + 0.887, + 0.457 + ], + "angle": 0, + "content": "Agentic-Tx inference time is suitable for real time human interaction Analysis of Agentic-Tx's inference time indicates efficient performance characteristics. The median time observed for tool execution is 0.55 seconds. The fastest tool (Gene Sequence) completes execution in 0.15 seconds, while the slowest (ToxCast) requires 28.2 seconds. This suggests that Agentic-Tx operates within a timeframe conducive to real-time user interaction. The observed latencies demonstrate suitability for integration into workflows where immediate feedback and responsiveness are desired. The system's ability to maintain a median inference time below one second contributes to an efficient user experience." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.47, + 0.457, + 0.485 + ], + "angle": 0, + "content": "3.4 Additional Analysis and Ablations" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.497, + 0.886, + 0.603 + ], + "angle": 0, + "content": "Data contamination analysis and data leakage considerations To assess potential data contamination from the Gemma-2 pretraining data, we calculated the overlap between features in the therapeutic instruction-tuning data and the pretraining corpus. For multi-instance tasks, contamination was defined as the presence of any constituent feature (e.g., drug SMILES or target protein sequence in drug-target binding) in the pretraining data. The majority of tasks showed no direct contamination (Figure S.12). For tasks with some contamination, filtering contaminated datapoints and recalculating TxGemma-27B-Predict performance revealed no significant changes (Figure S.13)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.608, + 0.886, + 0.761 + ], + "angle": 0, + "content": "While direct contamination was minimal, we further investigated potential indirect contamination. Although SMILES strings are less common in general web text, pretraining on molecular names could have created learned associations between names and SMILES, potentially influencing test set performance. To test this, we compared the similarity of TxGemma-27B-Predict embeddings for PubChem molecules represented as SMILES strings and their corresponding IUPAC names, against the similarity of embeddings for SMILES strings paired with decoy (randomly selected, incorrect) names. The similarities were statistically equivalent (Figure S.12), confirmed by a two one-sided t-test \\((p = 3 \\times 10^{-12}\\), \\(\\delta = 0.02)\\). This suggests that TxGemma-27B-Predict did not learn spurious name-SMILES associations during pretraining, likely because names and SMILES were encountered in separate training phases and for different molecules. Therefore, both direct and indirect contamination from pretraining are unlikely to significantly affect our results." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.766, + 0.886, + 0.904 + ], + "angle": 0, + "content": "Fine-tuning TxGemma models improves data efficiency. Given the scarcity of therapeutic data and the potential of TxGemma to serve as a pretrained model for further adaptation, we investigated TxGemma's data efficiency and generalization to new tasks in out-of-distribution settings. Specifically, we fine-tuned the baseline model Gemma-2-27B as well as our TxGemma-27B-Predict on adverse event prediction data from TrialBench [29]. Serious adverse events are critical in assessing the safety profile of a new treatment and accurate prediction of these events allows for better risk management and resource allocation [29]. To ensure a fair evaluation of generalization, we filtered the TrialBench test set to exclude samples overlapping with phase 1, 2, or 3 of clinical trial outcome prediction data in TDC. In addition, datapoints without available SMILES strings are excluded. This lead to 14,368 train and 3,184 test samples." + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.885, + 0.95 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.118, + 0.09, + 0.315, + 0.258 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.319, + 0.091, + 0.501, + 0.258 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.092, + 0.691, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.697, + 0.092, + 0.882, + 0.258 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.16, + 0.269, + 0.87, + 0.284 + ], + "angle": 0, + "content": "Gemma-27B (S) \\(\\rightarrow\\) TxGemma-27B-Predict (S) --- Gemma-27B (S+T) —— TxGemma-27B-Predict (S+T) --- Best-in-class (S+T)" + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.305, + 0.889, + 0.412 + ], + "angle": 0, + "content": "Figure 7 | TxGemma improves efficiency at adverse event prediction from SMILES strings. The figure shows the AUROC of predicting adverse events in a clinical trial from the drug SMILES strings as a function of the training data fraction for Gemma-2-27B and TxGemma-27B-Predict. Clinical trials are separated based on trial phase, and datapoints without available SMILES strings are excluded. To assess model performance with additional textual information, separate models trained on both SMILES strings and additional textual information are indicated by colored dashed lines, and SOTA models are indicated by gray dashed lines. (S) denotes models trained with SMILES strings only, and \\((\\mathrm{S} + \\mathrm{T})\\) those trained with SMILES and textual information (Table S.10)." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.445, + 0.889, + 0.629 + ], + "angle": 0, + "content": "We consider two settings. Initially, we focus exclusively on drug SMILES strings as the only feature contributing to clinical trial outcome, thereby isolating the influence of therapeutic information by excluding this additional context. To simulate data limitations, we fine-tuned TxGemma-27B-Predict and the baseline Gemma-2-27B on varying fractions of the training data, and then evaluated the newly fine-tuned models performance on the test set after 30 epochs of training (Figure 7). Overall, TxGemma-27B-Predict achieved higher AUROCs with lower amounts of training data, matching the performance of Gemma-2-27B with less than \\(10\\%\\) of retraining data. In the second setting, we explored the performance ceiling by incorporating textual information about the clinical trials, increasing the number of tokens provided to the models by a factor of 4 (Table S.10). This is the setting used by the best-in-class model for adverse event prediction [29]. The addition of textual information allowed our models to consistently outperform existing SOTA methods [29]. However, the performance difference between TxGemma-27B-Predict and Gemma-2-27B decreased in this scenario because the additional textual information diluted the relative importance of the drug SMILES strings." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.633, + 0.888, + 0.71 + ], + "angle": 0, + "content": "TxGemma inference time is suitable for virtual screening In Figure S.11, we plot the inference speed of TxGemma models of all sizes normalized by the number of TPUv5e chips used for serving. All model sizes are suitably fast for virtual screening, as even the largest 27B model is able to inference around 9,000 samples per day per TPU chip. Using 64 chips for serving, this would yield around 600,000 samples per day for the 27B model, and the smallest 2B model would reach 3,000,000 samples per day." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.715, + 0.888, + 0.792 + ], + "angle": 0, + "content": "Correlation between clinical trial approval and toxicity predictions We investigated the correlation between TxGemma's clinical trial approval predictions (based on SMILES and target disease) and its toxicity predictions (using TDC's AMES, DILI, and hERG tasks). Figure S.18 shows a consistent, but weak (0.15-0.35), positive Spearman correlation across all phases. This suggests TxGemma associates lower predicted toxicity with approval, but may also consider other factors such as efficacy or drug-likeness." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.796, + 0.888, + 0.874 + ], + "angle": 0, + "content": "Impact of feature types Figure S.16 presents a performance breakdown of TxGemma-27B-Predict by feature type, compared to Tx-LLM M. In both models, tasks incorporating both SMILES strings and textual features (e.g., disease names, cell line names/description) show the most significant improvement over SOTA. This suggests that the contextual knowledge acquired during LLM pretraining could aid in synthesizing textual information with molecular representations." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.878, + 0.888, + 0.91 + ], + "angle": 0, + "content": "Model size and domain fine-tuning ablations Figure S.17 compares the performance of TxGemma-Predict models across different sizes (2B, 9B, and 27B) on TDC tasks. Pairwise comparisons using the Wilcoxon" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.886, + 0.95 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.09, + 0.888, + 0.184 + ], + "angle": 0, + "content": "signed-rank test indicate that model size is a significant factor: TxGemma-27B-Predict outperforms TxGemma-9B-Predict \\((p = 0.013)\\) and TxGemma-2B-Predict \\((p = 6.2 \\times 10^{-6})\\), and TxGemma-9B-Predict outperforms TxGemma-2B-Predict \\((p = 0.048)\\). Furthermore, comparing TxGemma models to their corresponding base Gemma-2 models reveals the significant impact of domain fine-tuning. All TxGemma models significantly outperform their Gemma-2 counterparts \\((p < 10^{-10}\\), Wilcoxon signed-rank test), underscoring the importance of specialized training for therapeutic tasks." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.2, + 0.268, + 0.216 + ], + "angle": 0, + "content": "4 Related work" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.229, + 0.888, + 0.322 + ], + "angle": 0, + "content": "Task-specific models for chemistry and therapeutics. In recent years, there has been a surge in the development of deep learning models designed for various chemistry applications. Amongst those, graph neural networks (GNNs) have been applied for a wide variety of molecular prediction or generation tasks because small molecules are naturally represented as graphs [30, 31, 32, 33, 34, 35, 36, 37, 24]. Another common representation for small molecules is molecular fingerprints [38], which are binary vectors that capture the local environment of each atom [30, 39, 40]." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.326, + 0.887, + 0.389 + ], + "angle": 0, + "content": "TxGNN trained a GNN on medical knowledge graphs in order to perform zero-shot drug repurposing for diseases with limited treatment options [41]. AlphaFold and its successors have also significantly advanced the field of protein structure prediction and protein design [42, 43, 44, 45, 46]. These models have been influential for both mechanistic research and the development of structure-based drugs [47]." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.393, + 0.889, + 0.471 + ], + "angle": 0, + "content": "Large language models for biology and chemistry. Transformer-based models [48] have fueled the development of LLMs, which are trained on massive textual datasets with subsequent instruction-tuning [49] or alignment [50]. LLMs have demonstrated exceptional proficiency in various tasks, including text summarization, translation, and question answering [16, 51, 52]. Their ability to encode vast amounts of information and generalize to new tasks has sparked considerable interest in their potential applications across diverse domains." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.474, + 0.888, + 0.643 + ], + "angle": 0, + "content": "There has been increasing interest in applying the development for LLMs to scientific research. BrainGPT fine-tuned LLMs on neuroscience literature and found greater performance than domain experts [53]. LlaSMol fine-tuned LLMs on small molecule datasets and achieved near-SOTA performance on multiple tasks [23]. CLAMP used separate modules for natural language and molecular inputs, combining them together in a contrastive pre-training objective [54]. Protein language models [55, 56, 57, 58] and genomic language models [59, 60, 61] have used self-supervised pretraining to generate embeddings useful for downstream tasks. ProtLLM [62], BioT5 [63], and GraphToken [64] combine molecule or proteins with LLMs using textual or multi-modal strategies. Cellular foundation models such as scGPT [65], GenePT [66], Geneformer [67], Nicheformer [68], and Cell2Sentence [69] represent cells based on their gene expression to differentiate cell types and understand gene perturbations. NatureLM [70] trained a foundation model that represents small molecules, proteins, RNA, and materials as sequences over a wide variety of scientific tasks." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.647, + 0.888, + 0.877 + ], + "angle": 0, + "content": "Agentic Systems. Unlike traditional passive models, agentic systems proactively choose actions to achieve goals [71, 72, 73, 74, 75], involving planning [76, 77, 78, 79, 80] and interaction with external tools [81, 82, 83, 84]. LLMs have enabled such systems by processing complex information and generating action-driving responses. The ReAct framework [22] combines reasoning, action, and observation, with variations incorporating self-reflection [85] or model architectures for internal tool usage [82]. Agentic frameworks enable automating tasks like software development [73, 86, 87, 88] and scientific research [89, 90, 91] including biomedical applications such as nanobody design [90], drug discovery [92], or reaction optimization [93]. ChemCrow [92] is an agent designed to perform chemistry experiments in drug discovery and materials design. The coscientist by Boiko et al. [93] designs and performs chemical experiments by integrating web knowledge, code execution, and experiment automation, demonstrating successful reaction optimization of palladium-catalysed cross-couplings. The multi-agent system AI co-scientist [88] is designed for hypothesis generation over a variety of scientific fields. TxAgent was developed as an agentic framework that provides multi-step reasoning and tool use aimed towards therapeutic applications, processing clinical information to support tasks like treatment recommendation [94]. In contrast to recommending existing therapeutics, Agentic-Tx generally focuses on developing new therapeutics." + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.885, + 0.95 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.09, + 0.239, + 0.107 + ], + "angle": 0, + "content": "5 Discussion" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.12, + 0.885, + 0.241 + ], + "angle": 0, + "content": "TxGemma's performance suggests a paradigm shift in therapeutic AI development, demonstrating the viability of generalist LLMs. Despite the established dominance of specialist models in niche areas, TxGemma, a relatively lightweight and efficient generalist, achieves competitive results across a wide array of therapeutic tasks. This highlights the potential for broadly trained LLMs, such as those leveraging the comprehensive dataset Therapeutics Data Commons (TDC), to serve as powerful preliminary tools for hypothesis generation, information synthesis, and candidate prioritization. While specialist models would likely retain their value for complex, domain-specific challenges, future research should explore synergistic approaches that combine the strengths of both generalist and specialist therapeutic AI." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.247, + 0.885, + 0.414 + ], + "angle": 0, + "content": "A significant advancement with TxGemma-Chat is its ability to provide reasoning for its predictions, a first in therapeutic AI and a feature lost in TxGemma-Predict, likely due to \"catastrophic forgetting\" [95]. While explainability may introduce a small trade-off in raw predictive power, it provides a crucial window into the model's decision-making, a factor of paramount importance in therapeutic development. For instance, explaining blood-brain barrier permeability based on molecular structure provides valuable insights for medicinal chemists. Beyond its research applications, TxGemma-Chat holds a significant educational potential, enabling students and researchers to explore complex therapeutic concepts. At the same time, it is important to acknowledge that provided explanations are correlations, not necessarily causal, and must be interpreted with caution. The model's occasional inability to explain certain predictions reveals its knowledge boundaries. Future research should prioritize improving reliability and comprehensive explanations. Even with current limitations, TxGemma-Chat represents an important improvement over the \"black-box\" paradigm." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.42, + 0.885, + 0.557 + ], + "angle": 0, + "content": "Expanding beyond single-step predictions, Agentic-Tx demonstrates the potential for LLMs to orchestrate complex workflows. By integrating TxGemma with a suite of external tools (PubMed, Wikipedia, chemical databases, etc), Agentic-Tx can tackle multi-step reasoning tasks that would be difficult for a standalone LLM. Its strong performance on benchmarks like ChemBench Chemical Preference and Humanity's Last Exam (HLE) highlights the synergistic value of integrating domain-specific knowledge from TxGemma with general reasoning and information retrieval. This modular, tool-based design further ensures flexibility and extensibility, allowing for future integration of new tools and data. Importantly, it solves the issue of knowledge cut-off in LLMs by providing access to up-to-date information. Agentic-Tx with its autonomous and collaborative operation is a powerful asset for augmenting researchers and advancing therapeutic development." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.562, + 0.885, + 0.653 + ], + "angle": 0, + "content": "The data efficiency of TxGemma is clearly demonstrated in fine-tuning experiments on TrialBench. It achieves robust performance on novel tasks with substantially less training data compared to baseline models, showcasing the benefits of pre-training on a broad and diverse dataset like TDC. This efficiency is particularly critical in therapeutic domains, where data is often proprietary and limited. Moreover, our finding that adding textual context, while improving overall results, can dilute the influence of molecular representations emphasizes the importance of balancing the benefits of additional information with strategic feature selection." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.659, + 0.885, + 0.781 + ], + "angle": 0, + "content": "Although our in-silico results across a diverse range of therapeutic tasks are highly encouraging, we acknowledge that TxGemma's performance has not yet been validated in real-world, wet-lab experiments. Prospective validation in these settings represents a crucial next step. However, a cornerstone of this work is our commitment to open model release. By making TxGemma readily accessible to the research community, we aim to facilitate its rigorous validation and adaptation. Researchers can tailor TxGemma to their specific datasets, encompassing tasks and distribution shifts beyond the scope of TDC. Given the predominantly proprietary nature of therapeutic data, we believe this collaborative, community-driven approach is essential for translating TxGemma into tangible therapeutic applications" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.799, + 0.242, + 0.815 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.829, + 0.885, + 0.905 + ], + "angle": 0, + "content": "In conclusion, this work introduced TxGemma, a suite of efficient, generalist LLMs designed to improve therapeutic development. By leveraging extensive therapeutic instruction-tuning datasets and building upon the foundation of Gemma-2, TxGemma achieves exceptional performance across a wide range of predictive and generative therapeutic tasks, surpassing or matching both generalist and specialist state-of-the-art models. Notably, TxGemma's conversational counterparts, a first in therapeutic AI, provide reasoning and explanations," + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.884, + 0.949 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.091, + 0.887, + 0.198 + ], + "angle": 0, + "content": "moving beyond traditional black-box predictions to facilitate mechanistic understanding and scientific discourse. Furthermore, the integration of TxGemma into an agentic system, Agentic-Tx, demonstrates its capacity to solve complex, multi-step problems, achieving state-of-the-art results on challenging reasoning-intensive tasks. Finally, and critically, the open release of TxGemma empowers the research community and scientist to adapt and refine the models on their own private data, potentially leading to significant advancements in drug discovery and development. Through these contributions, TxGemma represents a meaningful step towards more efficient, transparent, and collaborative AI-driven therapeutic research." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.228, + 0.265, + 0.243 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.253, + 0.888, + 0.406 + ], + "angle": 0, + "content": "This project was a collaboration between teams at Google DeepMind and Google Research. We thank Marcus Brubaker, David Belanger, Justin Chen, and David Steiner for the feedback and insight which significantly contributed to the enhancement of this report. We thank Tris Warkentin, Glenn Cameron, Victor Cotruta, Fereshteh Mahvar, Tiffany Chen, Omar Sansevier, Kathleen Kenealy, Joe Fernandez, Gus Martins, Nabila Babar, Sara Smoot, Antonia Paterson, Pankil Botadra, Metin Toksoz-Exley, Tim Thelin, Can \"John\" Kirmizi, and Fayaz Jamil for their collaborative efforts in enabling the open model launch of TxGemma. We also thank Phoebe Kirk, Rachelle Sico, Yun Liu, Anand Rao, Jon Small, Juanita Bawagan, Jane Park, Jenn Sturgeon, Fred Alcober, Samantha Heyman, Abhinav Das for their valuable insights and technical support. We are also grateful to Zoubin Ghahramani, Raia Hadsell, Avinatan Hassidim, Katherine Chou, Dale Webster, Jon Shlens, and Pushmeet Kohli for their support during the course of this project." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.417, + 0.281, + 0.431 + ], + "angle": 0, + "content": "Inclusion and ethics" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.443, + 0.887, + 0.52 + ], + "angle": 0, + "content": "While AI offers transformative potential in drug discovery, ethical considerations and transparency remain crucial. Biases in training data can lead to inequities, highlighting the need for diverse datasets and explainable AI systems. Our model, while still in the research stage, highlights the continuous need for development and refinement in this field. We acknowledge the difficulty in explaining the inner workings of complex models, but remain dedicated to advancing research in this area." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.531, + 0.253, + 0.546 + ], + "angle": 0, + "content": "Data availability" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.556, + 0.887, + 0.618 + ], + "angle": 0, + "content": "The Therapeutics Data Commons (TDC) datasets used for developing, benchmarking, and evaluating TxGemma are publicly available on their website. The benchmarking datasets used in this study—Humanity's Last Exam (HLE), GPQA (Diamond), ChemBench, and TrialBench (Serious Adverse Event Prediction)—are all publicly available via their respective websites." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.629, + 0.255, + 0.645 + ], + "angle": 0, + "content": "Code availability" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.655, + 0.888, + 0.807 + ], + "angle": 0, + "content": "All of the components used in this work are available publicly. For reproducibility, we have documented technical methods and data curation detail in depth, while keeping the paper accessible to clinical and general scientific audiences. Specifically, all the data needs to reproduce this work is publicly accessible to the community. TxGemma, a collection of lightweight state-of-the-art, open language models, are provided for researchers in three model size of 2B, 9B, and 27B and is accessible through Vertex AI Model Garden and Hugging Face. TxGemma's Github repository including supporting code and colab notebooks for quick start are also available at: https://github.com/google-gemini/gemma-cookbook/tree/main/TxGemma. We have specifically provided starter colabs for inference, fine-tuning, and exploring agentic capabilities. TxGemma remains a research model and requires refinement. We look forward to working with research partners, regulators, and providers to validate and explore safe onward uses of TxGemma." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.818, + 0.295, + 0.833 + ], + "angle": 0, + "content": "Author Contributions" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.844, + 0.887, + 0.906 + ], + "angle": 0, + "content": "E.W., S.S., and S.A. made substantial contributions to the conception, design, and evaluation of this work. They played a key role in data analysis, interpretation of results, and the drafting and revision of the manuscript. P.F.J. contributed to drafting and revision of the manuscript. F.Z. contributed to the data processing and model training in the manuscript. R.P. contributed to obtaining necessary legal approvals," + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.886, + 0.95 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.091, + 0.887, + 0.123 + ], + "angle": 0, + "content": "and organizational support. All authors participated in critically reviewing and revising the manuscript and interpreting the data and findings." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.133, + 0.285, + 0.149 + ], + "angle": 0, + "content": "Competing interests" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.158, + 0.888, + 0.205 + ], + "angle": 0, + "content": "This study was funded by Alphabet Inc and/or a subsidiary thereof ('Alphabet'). E.W., S.S., P.F.J., F.Z., R.P., Y.M., J.B., D.F., and S.A. are employees of Alphabet and may own stock as part of the standard compensation package." + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.884, + 0.95 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.122, + 0.091, + 0.23, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.121, + 0.887, + 0.145 + ], + "angle": 0, + "content": "1. Mirza, A., Alampara, N., Kunchapu, S., Rios-Garcia, M., Emoekabu, B., Krishnan, A., Gupta, T., Schilling-Wilhelmi, M., Okereke, M., Aneesh, A., et al. Are large language models superhuman chemists? arXiv preprint arXiv:2404.01475 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.147, + 0.887, + 0.172 + ], + "angle": 0, + "content": "2. OpenAI. Learning to Reason with LLMs https://openai.com/index/learning-to-reason-with-llms/. Accessed: Wednesday 9th April, 2025. 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.174, + 0.887, + 0.198 + ], + "angle": 0, + "content": "3. Sun, D., Gao, W., Hu, H. & Zhou, S. Why \\(90\\%\\) of clinical drug development fails and how to improve it? Acta Pharmaceutica Sinica B 12, 3049-3062 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.2, + 0.887, + 0.223 + ], + "angle": 0, + "content": "4. Hinkson, I. V., Madej, B. & Stahlberg, E. A. Accelerating therapeutics for opportunities in medicine: a paradigm shift in drug discovery. Frontiers in pharmacology 11, 770 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.225, + 0.887, + 0.248 + ], + "angle": 0, + "content": "5. Kumar, A., Voet, A. & Zhang, K. Y. Fragment based drug design: from experimental to computational approaches. *Current medicinal chemistry* 19, 5128-5147 (2012)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.25, + 0.887, + 0.273 + ], + "angle": 0, + "content": "6. Velez-Arce, A., Huang, K., Li, M. M., Lin, X., Gao, W., Fu, T., Kellis, M., Pentelute, B. L. & Zitnik, M. TDC-2: Multimodal foundation for therapeutic science. bioRxiv, 2024-06 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.275, + 0.887, + 0.31 + ], + "angle": 0, + "content": "7. Huang, K., Fu, T., Gao, W., Zhao, Y., Roohani, Y., Leskovec, J., Coley, C. W., Xiao, C., Sun, J. & Zitnik, M. Therapeutics data commons: Machine learning datasets and tasks for drug discovery and development. arXiv preprint arXiv:2102.09548 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.312, + 0.887, + 0.335 + ], + "angle": 0, + "content": "8. Huang, K., Fu, T., Gao, W., Zhao, Y., Roohani, Y., Leskovec, J., Coley, C. W., Xiao, C., Sun, J. & Zitnik, M. Artificial intelligence foundation for therapeutic science. Nature chemical biology 18, 1033-1036 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.337, + 0.887, + 0.36 + ], + "angle": 0, + "content": "9. Bubeck, S., Chandrasekaran, V., Eldan, R., Gehrke, J., Horvitz, E., Kamar, E., Lee, P., Lee, Y. T., Li, Y., Lundberg, S., et al. Sparks of artificial general intelligence: Early experiments with GPT-4. arXiv preprint arXiv:2303.12712 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.362, + 0.887, + 0.385 + ], + "angle": 0, + "content": "10. Taylor, R., Kardas, M., Cucurull, G., Scialom, T., Hartshorn, A., Saravia, E., Poulton, A., Kerkez, V. & Stojnic, R. Galactica: A large language model for science. arXiv preprint arXiv:2211.09085 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.387, + 0.887, + 0.411 + ], + "angle": 0, + "content": "11. Telenti, A., Auli, M., Hie, B. L., Maher, C., Saria, S. & Ioannidis, J. P. Large language models for science and medicine. European journal of clinical investigation 54, e14183 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.413, + 0.887, + 0.436 + ], + "angle": 0, + "content": "12. Chaves, J. M. Z., Wang, E., Tu, T., Vaishnav, E. D., Lee, B., Mahdavi, S. S., Semturs, C., Fleet, D., Natarajan, V. & Azizi, S. Tx-LLM: A Large Language Model for Therapeutics. arXiv preprint arXiv:2406.06316 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.438, + 0.887, + 0.461 + ], + "angle": 0, + "content": "13. Team, G., Mesnard, T., Hardin, C., Dadashi, R., Bhupatiraju, S., Pathak, S., Sifre, L., Riviere, M., Kale, M. S., Love, J., et al. Gemma: Open models based on gemini research and technology. arXiv preprint arXiv:2403.08295 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.463, + 0.887, + 0.487 + ], + "angle": 0, + "content": "14. Team, G., Riviere, M., Pathak, S., Sessa, P. G., Hardin, C., Bhupatiraju, S., Hussenot, L., Mesnard, T., Shahriari, B., Ramé, A., et al. Gemma 2: Improving open language models at a practical size. arXiv preprint arXiv:2408.00118 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.489, + 0.887, + 0.512 + ], + "angle": 0, + "content": "15. Phan, L., Gatti, A., Han, Z., Li, N., Hu, J., Zhang, H., Shi, S., Choi, M., Chopra, A., et al. Humanity's Last Exam. arXiv preprint arXiv:2501.14249 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.514, + 0.887, + 0.537 + ], + "angle": 0, + "content": "16. Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J. D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al. Language models are few-shot learners. Advances in neural information processing systems 33, 1877-1901 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.539, + 0.887, + 0.573 + ], + "angle": 0, + "content": "17. Longpre, S., Hou, L., Vu, T., Webson, A., Chung, H. W., Tay, Y., Zhou, D., Le, Q. V., Zoph, B., Wei, J., et al. The FLAN collection: Designing data and methods for effective instruction tuning in International Conference on Machine Learning (2023), 22631-22648." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.575, + 0.887, + 0.599 + ], + "angle": 0, + "content": "18. Team, G., Anil, R., Borgeaud, S., Alayrac, J.-B., Yu, J., Soricut, R., Schalkwyk, J., Dai, A. M., Hauth, A., Millican, K., et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.601, + 0.887, + 0.624 + ], + "angle": 0, + "content": "19. Landrum, G. RDKit: Open-Source Cheminformatics Software. https://github.com/rdkit/rdkit/releases/tag/Release_2016_09_4 (2016)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.626, + 0.613, + 0.639 + ], + "angle": 0, + "content": "20. Dalke, A. The chemfp project. Journal of cheminformatics 11, 1-21 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.641, + 0.887, + 0.674 + ], + "angle": 0, + "content": "21. Sievers, F., Wilm, A., Dineen, D., Gibson, T. J., Karplus, K., Li, W., Lopez, R., McWilliam, H., Remmert, M., Söding, J., et al. Fast, scalable generation of high-quality protein multiple sequence alignments using Clustal Omega. Molecular systems biology 7, 539 (2011)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.676, + 0.887, + 0.699 + ], + "angle": 0, + "content": "22. Yao, S., Zhao, J., Yu, D., Du, N., Shafran, I., Narasimhan, K. & Cao, Y. React: Synergizing reasoning and acting in language models. arXiv preprint arXiv:2210.03629 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.701, + 0.887, + 0.725 + ], + "angle": 0, + "content": "23. Yu, B., Baker, F. N., Chen, Z., Ning, X. & Sun, H. Llasmol: Advancing large language models for chemistry with a large-scale, comprehensive, high-quality instruction tuning dataset. arXiv preprint arXiv:2402.09391 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.727, + 0.887, + 0.75 + ], + "angle": 0, + "content": "24. Mendez-Lucio, O., Nicolaou, C. A. & Earnshaw, B. MolE: a foundation model for molecular graphs using disentangled attention. Nature Communications 15, 9431 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.752, + 0.533, + 0.765 + ], + "angle": 0, + "content": "25. Team, G. Gemma 3 technical report. Google DeepMind (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.767, + 0.887, + 0.79 + ], + "angle": 0, + "content": "26. Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D. & Steinhardt, J. Measuring massive multitask language understanding. arXiv preprint arXiv:2009.03300 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.792, + 0.887, + 0.815 + ], + "angle": 0, + "content": "27. Rein, D., Hou, B. L., Stickland, A. C., Petty, J., Pang, R. Y., Dirani, J., Michael, J. & Bowman, S. R. Gpqa: A graduate-level google-proof q@a benchmark in First Conference on Language Modeling (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.817, + 0.887, + 0.851 + ], + "angle": 0, + "content": "28. Skarlinski, M. D., Cox, S., Laurent, J. M., Braza, J. D., Hinks, M., Hammerling, M. J., Ponnapati, M., Rodriques, S. G. & White, A. D. Language agents achieve superhuman synthesis of scientific knowledge. arXiv preprint arXiv:2409.13740 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.853, + 0.887, + 0.876 + ], + "angle": 0, + "content": "29. Chen, J., Hu, Y., Wang, Y., Lu, Y., Cao, X., Lin, M., Xu, H., Wu, J., Xiao, C., Sun, J., et al. TrialBench: Multi-modal artificial intelligence-ready clinical trial datasets. arXiv preprint arXiv:2407.00631 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.878, + 0.887, + 0.902 + ], + "angle": 0, + "content": "30. Torng, W. & Altman, R. B. Graph convolutional neural networks for predicting drug-target interactions. Journal of chemical information and modeling 59, 4131-4149 (2019)." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.121, + 0.887, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.092, + 0.888, + 0.116 + ], + "angle": 0, + "content": "31. Stärk, H., Ganea, O., Pattanaik, L., Barzilay, R. & Jaakkola, T. Equibind: Geometric deep learning for drug binding structure prediction in International conference on machine learning (2022), 20503-20521." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.117, + 0.888, + 0.152 + ], + "angle": 0, + "content": "32. Xiong, Z., Wang, D., Liu, X., Zhong, F., Wan, X., Li, X., Li, Z., Luo, X., Chen, K., Jiang, H., et al. Pushing the boundaries of molecular representation for drug discovery with the graph attention mechanism. Journal of medicinal chemistry 63, 8749-8760 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.153, + 0.887, + 0.178 + ], + "angle": 0, + "content": "33. Heid, E. & Green, W. H. Machine learning of reaction properties via learned representations of the condensed graph of reaction. Journal of Chemical Information and Modeling 62, 2101-2110 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.179, + 0.887, + 0.213 + ], + "angle": 0, + "content": "34. Yang, K., Swanson, K., Jin, W., Coley, C., Eiden, P., Gao, H., Guzman-Perez, A., Hopper, T., Kelley, B., Mathea, M., et al. Analyzing learned molecular representations for property prediction. Journal of chemical information and modeling 59, 3370-3388 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.215, + 0.887, + 0.25 + ], + "angle": 0, + "content": "35. Morrone, J. A., Weber, J. K., Huynh, T., Luo, H. & Cornell, W. D. Combining docking pose rank and structure with deep learning improves protein-ligand binding mode prediction over a baseline docking approach. Journal of chemical information and modeling 60, 4170-4179 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.251, + 0.887, + 0.275 + ], + "angle": 0, + "content": "36. Mohr, B., Shmilovich, K., Kleinwächter, I. S., Schneider, D., Ferguson, A. L. & Bereau, T. Data-driven discovery of cardiolipin-selective small molecules by computational active learning. Chemical Science 13, 4498-4511 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.276, + 0.887, + 0.3 + ], + "angle": 0, + "content": "37. Stokes, J. M., Yang, K., Swanson, K., Jin, W., Cubillos-Ruiz, A., Donghia, N. M., MacNair, C. R., French, S., Carfrae, L. A., Bloom-Ackermann, Z., et al. A deep learning approach to antibiotic discovery. Cell 180, 688-702 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.301, + 0.887, + 0.325 + ], + "angle": 0, + "content": "38. Rogers, D. & Hahn, M. Extended-connectivity fingerprints. Journal of chemical information and modeling 50, 742-754 (2010)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.327, + 0.887, + 0.362 + ], + "angle": 0, + "content": "39. Tayyebi, A., Alshami, A. S., Rabiei, Z., Yu, X., Ismail, N., Talukder, M. J. & Power, J. Prediction of organic compound aqueous solubility using machine learning: a comparison study of descriptor-based and fingerprints-based models. Journal of Cheminformatics 15, 99 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.363, + 0.887, + 0.387 + ], + "angle": 0, + "content": "40. Belenahalli Shekarappa, S., Kandagalla, S. & Lee, J. Development of machine learning models based on molecular fingerprints for selection of small molecule inhibitors against JAK2 protein. Journal of Computational Chemistry 44, 1493-1504 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.388, + 0.887, + 0.412 + ], + "angle": 0, + "content": "41. Huang, K., Chandak, P., Wang, Q., Havaldar, S., Vaid, A., Leskovec, J., Nadkarni, G. N., Glicksberg, B. S., Gehlenborg, N. & Zitnik, M. A foundation model for clinician-centered drug repurposing. Nature Medicine, 1-13 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.414, + 0.887, + 0.44 + ], + "angle": 0, + "content": "42. Jumper, J., Evans, R., Pritzel, A., Green, T., Figurnov, M., Ronneberger, O., Tunyasuvunakool, K., Bates, R., Zidek, A., Potapenko, A., et al. Highly accurate protein structure prediction with AlphaFold. nature 596, 583-589 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.442, + 0.887, + 0.467 + ], + "angle": 0, + "content": "43. Tunyasuvunakool, K., Adler, J., Wu, Z., Green, T., Zielinski, M., Žídek, A., Bridgland, A., Cowie, A., Meyer, C., Laydon, A., et al. Highly accurate protein structure prediction for the human proteome. Nature 596, 590-596 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.469, + 0.887, + 0.495 + ], + "angle": 0, + "content": "44. Senior, A. W., Evans, R., Jumper, J., Kirkpatrick, J., Sifre, L., Green, T., Qin, C., Zidek, A., Nelson, A. W., Bridgland, A., et al. Improved protein structure prediction using potentials from deep learning. Nature 577, 706-710 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.496, + 0.887, + 0.519 + ], + "angle": 0, + "content": "45. Abramson, J., Adler, J., Dunger, J., Evans, R., Green, T., Pritzel, A., Ronneberger, O., Willmore, L., Ballard, A. J., Bambrick, J., et al. Accurate structure prediction of biomolecular interactions with AlphaFold 3. Nature, 1-3 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.52, + 0.887, + 0.555 + ], + "angle": 0, + "content": "46. Zambaldi, V., La, D., Chu, A. E., Patani, H., Danson, A. E., Kwan, T. O., Frerix, T., Schneider, R. G., Saxton, D., Thillaisundaram, A., et al. De novo design of high-affinity protein binders with AlphaProteo. arXiv preprint arXiv:2409.08022 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.557, + 0.887, + 0.592 + ], + "angle": 0, + "content": "47. Ren, F., Ding, X., Zheng, M., Korzinkin, M., Cai, X., Zhu, W., Mantsyzov, A., Aliper, A., Aladinskiy, V., Cao, Z., et al. AlphaFold accelerates artificial intelligence powered drug discovery: efficient discovery of a novel CDK20 small molecule inhibitor. Chemical science 14, 1443-1452 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.593, + 0.753, + 0.607 + ], + "angle": 0, + "content": "48. Vaswani, A. Attention is all you need. Advances in Neural Information Processing Systems (2017)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.608, + 0.887, + 0.631 + ], + "angle": 0, + "content": "49. Zhang, S., Dong, L., Li, X., Zhang, S., Sun, X., Wang, S., Li, J., Hu, R., Zhang, T., Wu, F., et al. Instruction tuning for large language models: A survey. arXiv preprint arXiv:2308.10792 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.632, + 0.887, + 0.656 + ], + "angle": 0, + "content": "50. Kaufmann, T., Weng, P., Bengs, V. & Hüllermeier, E. A survey of reinforcement learning from human feedback. arXiv preprint arXiv:2312.14925 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.658, + 0.82, + 0.671 + ], + "angle": 0, + "content": "51. Liu, Y. & Lapata, M. Text summarization with pretrained encoders. arXiv preprint arXiv:1908.08345 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.672, + 0.887, + 0.696 + ], + "angle": 0, + "content": "52. Kenton, J. D. M.-W. C. & Toutanova, L. K. BERT: Pre-training of deep bidirectional transformers for language understanding in Proceedings of naacL-HLT 1 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.697, + 0.887, + 0.732 + ], + "angle": 0, + "content": "53. Luo, X., Rechardt, A., Sun, G., Nejad, K. K., Yáñez, F., Yilmaz, B., Lee, K., Cohen, A. O., Borghesani, V., Pashkov, A., et al. Large language models surpass human experts in predicting neuroscience results. Nature human behaviour, 1-11 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.734, + 0.887, + 0.757 + ], + "angle": 0, + "content": "54. Seidl, P., Vall, A., Hochreiter, S. & Klambauer, G. Enhancing activity prediction models in drug discovery with the ability to understand human language in International Conference on Machine Learning (2023), 30458-30490." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.758, + 0.887, + 0.793 + ], + "angle": 0, + "content": "55. Rives, A., Meier, J., Sercu, T., Goyal, S., Lin, Z., Liu, J., Guo, D., Ott, M., Zitnick, C. L., Ma, J., et al. Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences. Proceedings of the National Academy of Sciences 118, e2016239118 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.795, + 0.887, + 0.819 + ], + "angle": 0, + "content": "56. Lin, Z., Akin, H., Rao, R., Hie, B., Zhu, Z., Lu, W., Smetanin, N., Verkuil, R., Kabeli, O., Shmueli, Y., et al. Evolutionary-scale prediction of atomic-level protein structure with a language model. Science 379, 1123-1130 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.82, + 0.887, + 0.844 + ], + "angle": 0, + "content": "57. Alley, E. C., Khimulya, G., Biswas, S., AlQuraishi, M. & Church, G. M. Unified rational protein engineering with sequence-based deep representation learning. Nature methods 16, 1315-1322 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.845, + 0.887, + 0.869 + ], + "angle": 0, + "content": "58. Ferruz, N., Schmidt, S. & Höcker, B. ProtGPT2 is a deep unsupervised language model for protein design. Nature communications 13, 4348 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.87, + 0.887, + 0.895 + ], + "angle": 0, + "content": "59. Nguyen, E., Poli, M., Durrant, M. G., Kang, B., Katrekar, D., Li, D. B., Bartie, L. J., Thomas, A. W., King, S. H., Brixi, G., et al. Sequence modeling and design from molecular to genome scale with Evo. Science 386, eado9336 (2024)." + }, + { + "type": "list", + "bbox": [ + 0.111, + 0.092, + 0.888, + 0.895 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.092, + 0.887, + 0.127 + ], + "angle": 0, + "content": "60. Dalla-Torre, H., Gonzalez, L., Mendoza-Revilla, J., Lopez Carranza, N., Grzywaczewski, A. H., Oteri, F., Dallago, C., Trop, E., de Almeida, B. P., Sirelkhatim, H., et al. Nucleotide Transformer: building and evaluating robust foundation models for human genomics. Nature Methods, 1-11 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.128, + 0.886, + 0.152 + ], + "angle": 0, + "content": "61. Cornman, A., West-Roberts, J., Camargo, A. P., Roux, S., Beracochea, M., Mirdita, M., Ovchinnikov, S. & Hwang, Y. The OMG dataset: An Open MetaGenomic corpus for mixed-modality genomic language modeling. bioRxiv, 2024-08 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.153, + 0.885, + 0.178 + ], + "angle": 0, + "content": "62. Zhuo, L., Chi, Z., Xu, M., Huang, H., Zheng, H., He, C., Mao, X.-L. & Zhang, W. Protllm: An interleaved protein-language llm with protein-as-word pre-training. arXiv preprint arXiv:2403.07920 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.179, + 0.885, + 0.203 + ], + "angle": 0, + "content": "63. Pei, Q., Zhang, W., Zhu, J., Wu, K., Gao, K., Wu, L., Xia, Y. & Yan, R. Biot5: Enriching cross-modal integration in biology with chemical knowledge and natural language associations. arXiv preprint arXiv:2310.07276 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.204, + 0.885, + 0.227 + ], + "angle": 0, + "content": "64. Anonymous. Parameter Efficient Graph Encoding for Large Language Models 2025. https://openreview.net/forum?id=RbcXV63ZJk." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.228, + 0.885, + 0.254 + ], + "angle": 0, + "content": "65. Cui, H., Wang, C., Maan, H., Pang, K., Luo, F., Duan, N. & Wang, B. scGPT: toward building a foundation model for single-cell multi-omics using generative AI. Nature Methods, 1-11 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.255, + 0.885, + 0.279 + ], + "angle": 0, + "content": "66. Chen, Y. & Zou, J. GenePT: a simple but effective foundation model for genes and cells built from ChatGPT. bioRxiv, 2023-10 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.28, + 0.885, + 0.304 + ], + "angle": 0, + "content": "67. Theodoris, C. V., Xiao, L., Chopra, A., Chaffin, M. D., Al Sayed, Z. R., Hill, M. C., Mantineo, H., Brydon, E. M., Zeng, Z., Liu, X. S., et al. Transfer learning enables predictions in network biology. Nature 618, 616-624 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.305, + 0.885, + 0.329 + ], + "angle": 0, + "content": "68. Schaar, A. C., Tejada-Lapuerta, A., Palla, G., Gutgesell, R., Halle, L., Minaeva, M., Vornholz, L., Dony, L., Drummer, F., Bahrami, M., et al. Nicheformer: a foundation model for single-cell and spatial omics. bioRxiv, 2024-04 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.33, + 0.885, + 0.354 + ], + "angle": 0, + "content": "69. Levine, D., Rizvi, S. A., Lévy, S., Pallikkavaliyaveetil, N., Zhang, D., Chen, X., Ghadermarzi, S., Wu, R., Zheng, Z., Vrkic, I., et al. Cell2Sentence: teaching large language models the language of biology. BioRxiv, 2023-09 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.355, + 0.885, + 0.38 + ], + "angle": 0, + "content": "70. Xia, Y., Jin, P., Xie, S., He, L., Cao, C., Luo, R., Liu, G., Wang, Y., Liu, Z., Chen, Y.-J., et al. NatureLM: Deciphering the Language of Nature for Scientific Discovery. arXiv preprint arXiv:2502.07527 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.381, + 0.885, + 0.405 + ], + "angle": 0, + "content": "71. Wang, L., Ma, C., Feng, X., Zhang, Z., Yang, H., Zhang, J., Chen, Z., Tang, J., Chen, X., Lin, Y., et al. A survey on large language model based autonomous agents. Frontiers of Computer Science 18, 186345 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.406, + 0.836, + 0.42 + ], + "angle": 0, + "content": "72. Shanahan, M., McDonell, K. & Reynolds, L. Role play with large language models. Nature 623, 493-498 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.421, + 0.885, + 0.445 + ], + "angle": 0, + "content": "73. Qian, C., Cong, X., Yang, C., Chen, W., Su, Y., Xu, J., Liu, Z. & Sun, M. Communicative agents for software development. arXiv preprint arXiv:2307.07924 6 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.446, + 0.885, + 0.47 + ], + "angle": 0, + "content": "74. Hong, S., Zheng, X., Chen, J., Cheng, Y., Wang, J., Zhang, C., Wang, Z., Yau, S. K. S., Lin, Z., Zhou, L., et al. Metagpt: Meta programming for multi-agent collaborative framework. arXiv preprint arXiv:2308.00352 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.471, + 0.885, + 0.495 + ], + "angle": 0, + "content": "75. Talebirad, Y. & Nadiri, A. Multi-agent collaboration: Harnessing the power of intelligent llm agents. arXiv preprint arXiv:2306.03314 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.497, + 0.885, + 0.521 + ], + "angle": 0, + "content": "76. Hao, S., Gu, Y., Ma, H., Hong, J. J., Wang, Z., Wang, D. Z. & Hu, Z. Reasoning with language model is planning with world model. arXiv preprint arXiv:2305.14992 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.522, + 0.885, + 0.546 + ], + "angle": 0, + "content": "77. Huang, W., Abbeel, P., Pathak, D. & Mordatch, I. Language models as zero-shot planners: Extracting actionable knowledge for embodied agents in International conference on machine learning (2022), 9118-9147." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.547, + 0.885, + 0.581 + ], + "angle": 0, + "content": "78. Song, C. H., Wu, J., Washington, C., Sadler, B. M., Chao, W.-L. & Su, Y. Lm-planner: Few-shot grounded planning for embodied agents with large language models in Proceedings of the IEEE/CVF International Conference on Computer Vision (2023), 2998-3009." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.583, + 0.885, + 0.607 + ], + "angle": 0, + "content": "79. Wang, Z., Cai, S., Chen, G., Liu, A., Ma, X. & Liang, Y. Describe, explain, plan and select: Interactive planning with large language models enables open-world multi-task agents. arXiv preprint arXiv:2302.01560 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.608, + 0.885, + 0.633 + ], + "angle": 0, + "content": "80. Yao, S., Yu, D., Zhao, J., Shafran, I., Griffiths, T., Cao, Y. & Narasimhan, K. Tree of thoughts: Deliberate problem solving with large language models. Advances in Neural Information Processing Systems 36 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.634, + 0.851, + 0.647 + ], + "angle": 0, + "content": "81. Parisi, A., Zhao, Y. & Fiedel, N. Talm: Tool augmented language models. arXiv preprint arXiv:2205.12255 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.648, + 0.887, + 0.683 + ], + "angle": 0, + "content": "82. Schick, T., Dwivedi-Yu, J., Dessi', R., Raileanu, R., Lomeli, M., Hambro, E., Zettlemoyer, L., Cancedda, N. & Scialom, T. Toolformer: Language models can teach themselves to use tools. Advances in Neural Information Processing Systems 36, 68539-68551 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.685, + 0.885, + 0.708 + ], + "angle": 0, + "content": "83. Qin, Y., Hu, S., Lin, Y., Chen, W., Ding, N., Cui, G., Zeng, Z., Zhou, X., Huang, Y., Xiao, C., et al. Tool learning with foundation models. ACM Computing Surveys 57, 1-40 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.709, + 0.885, + 0.733 + ], + "angle": 0, + "content": "84. Cai, T., Wang, X., Ma, T., Chen, X. & Zhou, D. Large language models as tool makers. arXiv preprint arXiv:2305.17126 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.735, + 0.885, + 0.759 + ], + "angle": 0, + "content": "85. Shinn, N., Cassano, F., Gopinath, A., Narasimhan, K. & Yao, S. Reflexion: Language agents with verbal reinforcement learning. Advances in Neural Information Processing Systems 36 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.76, + 0.885, + 0.784 + ], + "angle": 0, + "content": "86. Yang, J., Jimenez, C. E., Wettig, A., Lieret, K., Yao, S., Narasimhan, K. & Press, O. Swe-agent: Agent-computer interfaces enable automated software engineering. arXiv preprint arXiv:2405.15793 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.785, + 0.885, + 0.81 + ], + "angle": 0, + "content": "87. Qian, C., Dang, Y., Li, J., Liu, W., Chen, W., Yang, C., Liu, Z. & Sun, M. Experiential co-learning of software-developing agents. arXiv preprint arXiv:2312.17025 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.811, + 0.885, + 0.835 + ], + "angle": 0, + "content": "88. Gottweis, J., Weng, W.-H., Daryin, A., Tu, T., Palepu, A., Sirkovic, P., Myaskovsky, A., Weissenberger, F., Rong, K., Tanno, R., et al. Towards an AI co-scientist. arXiv preprint arXiv:2502.18864 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.836, + 0.885, + 0.86 + ], + "angle": 0, + "content": "89. Schmidgall, S., Su, Y., Wang, Z., Sun, X., Wu, J., Yu, X., Liu, J., Liu, Z. & Barsoum, E. Agent Laboratory: Using LLM Agents as Research Assistants. arXiv preprint arXiv:2501.04227 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.861, + 0.885, + 0.885 + ], + "angle": 0, + "content": "90. Swanson, K., Wu, W., Bulaong, N. L., Pak, J. E. & Zou, J. The virtual lab: Ai agents design new sars-cov-2 nanobodies with experimental validation. bioRxiv, 2024-11 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.887, + 0.885, + 0.911 + ], + "angle": 0, + "content": "91. Lu, C., Lu, C., Lange, R. T., Foerster, J., Clune, J. & Ha, D. The ai scientist: Towards fully automated open-ended scientific discovery. arXiv preprint arXiv:2408.06292 (2024)." + }, + { + "type": "list", + "bbox": [ + 0.112, + 0.092, + 0.887, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.883, + 0.949 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.092, + 0.887, + 0.116 + ], + "angle": 0, + "content": "92. M. Bran, A., Cox, S., Schilter, O., Baldassari, C., White, A. D. & Schwaller, P. Augmenting large language models with chemistry tools. Nature Machine Intelligence, 1-11 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.117, + 0.887, + 0.142 + ], + "angle": 0, + "content": "93. Boiko, D. A., MacKnight, R., Kline, B. & Gomes, G. Autonomous chemical research with large language models. Nature 624, 570-578 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.143, + 0.887, + 0.167 + ], + "angle": 0, + "content": "94. Gao, S., Zhu, R., Kong, Z., Noori, A., Su, X., Ginder, C., Tsiligkaridis, T. & Zitnik, M. TxAgent: An AI Agent for Therapeutic Reasoning Across a Universe of Tools. arXiv preprint arXiv:2503.10970 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.168, + 0.887, + 0.192 + ], + "angle": 0, + "content": "95. Aleixo, E. L., Colonna, J. G., Cristo, M. & Fernandes, E. Catastrophic forgetting in deep learning: A comprehensive taxonomy. arXiv preprint arXiv:2312.10549 (2023)." + }, + { + "type": "list", + "bbox": [ + 0.111, + 0.092, + 0.887, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.884, + 0.949 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.111, + 0.086, + 0.441, + 0.11 + ], + "angle": 0, + "content": "Supplementary Material" + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.127, + 0.264, + 0.143 + ], + "angle": 0, + "content": "Version control" + }, + { + "type": "title", + "bbox": [ + 0.11, + 0.159, + 0.334, + 0.175 + ], + "angle": 0, + "content": "V0 (25 March 2025) \\(\\rightarrow\\) V1" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.188, + 0.884, + 0.217 + ], + "angle": 0, + "content": "- Upgraded the Agentic-Tx system's orchestrator from Gemini 2.0 to Gemini 2.5. This enhancement results in significant performance improvements in complex workflow orchestration, as detailed in Table 3." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.218, + 0.884, + 0.249 + ], + "angle": 0, + "content": "- Added performance results of TxGemma-Predict and TxGemma-Chat (trained only on commercially licensed datasets) for binary classification (Table S.17), regression, and generation tasks (Table S.18)." + }, + { + "type": "list", + "bbox": [ + 0.135, + 0.188, + 0.884, + 0.249 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.266, + 0.236, + 0.284 + ], + "angle": 0, + "content": "A Summary" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.296, + 0.405, + 0.309 + ], + "angle": 0, + "content": "Data details as listed in Section B:" + }, + { + "type": "text", + "bbox": [ + 0.168, + 0.315, + 0.608, + 0.329 + ], + "angle": 0, + "content": "- Table S.1: Excluded TDC tasks and reasons for exclusion." + }, + { + "type": "text", + "bbox": [ + 0.168, + 0.33, + 0.884, + 0.359 + ], + "angle": 0, + "content": "- Table S.2: Number of samples in training, validation, and test sets for all binary classification tasks." + }, + { + "type": "text", + "bbox": [ + 0.168, + 0.361, + 0.885, + 0.39 + ], + "angle": 0, + "content": "- Table S.3: Number of samples in training, validation, and test sets for all regression and generation tasks." + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.391, + 0.597, + 0.406 + ], + "angle": 0, + "content": "- Table S.4: Descriptions of the binary classification tasks." + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.407, + 0.637, + 0.422 + ], + "angle": 0, + "content": "- Table S.5: Descriptions of the regression and generation tasks." + }, + { + "type": "text", + "bbox": [ + 0.168, + 0.422, + 0.821, + 0.437 + ], + "angle": 0, + "content": "- Table S.6 Types of features in the processed TDC data along with illustrative examples." + }, + { + "type": "text", + "bbox": [ + 0.168, + 0.438, + 0.841, + 0.453 + ], + "angle": 0, + "content": "Figure S.1: Distribution of TDC task sizes, aggregated over train, validation, and test sets." + }, + { + "type": "list", + "bbox": [ + 0.168, + 0.315, + 0.885, + 0.453 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.457, + 0.528, + 0.472 + ], + "angle": 0, + "content": "Method and modeling details as listed in Section C:" + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.477, + 0.633, + 0.492 + ], + "angle": 0, + "content": "- Table S.7 Examples of prompts for binary classification tasks." + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.492, + 0.673, + 0.507 + ], + "angle": 0, + "content": "- Table S.8 Examples of prompts for regression and generation tasks." + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.508, + 0.697, + 0.522 + ], + "angle": 0, + "content": "- Table S.9 Example of a 10-shot prompt for a binary classification task." + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.522, + 0.746, + 0.537 + ], + "angle": 0, + "content": "- Table S.10 Example of prompts for predicting adverse events in clinical trials." + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.538, + 0.748, + 0.553 + ], + "angle": 0, + "content": "- Table S.11 Example of Agentic-Tx response to a chemical preference question." + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.553, + 0.533, + 0.568 + ], + "angle": 0, + "content": "- Table S.12 List of tools available to Agentic-Tx." + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.568, + 0.884, + 0.597 + ], + "angle": 0, + "content": "- Figure S.2 Distribution of Tanimoto similarities for 10 nearest neighbors by dataset splits in the AMES task." + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.599, + 0.815, + 0.614 + ], + "angle": 0, + "content": "- Section C.1 Details about Wilcoxon signed-rank test used to assess model performance." + }, + { + "type": "list", + "bbox": [ + 0.169, + 0.477, + 0.884, + 0.614 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.619, + 0.446, + 0.633 + ], + "angle": 0, + "content": "Additional results as listed in Section D:" + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.638, + 0.6, + 0.653 + ], + "angle": 0, + "content": "- Additional prediction results for TxGemma (Section D.1)" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.655, + 0.884, + 0.685 + ], + "angle": 0, + "content": "* Table S.13 Performance on binary classification tasks for specialist SOTA, base Gemma-2, and TxGemma-Predict models." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.686, + 0.885, + 0.715 + ], + "angle": 0, + "content": "* Table S.14 Performance on regression and generation tasks for specialist SOTA, base Gemma-2, and TxGemma-Predict models." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.716, + 0.885, + 0.746 + ], + "angle": 0, + "content": "* Table S.15 Performance on binary classification tasks for TxGemma-Predict, TxGemma-Chat, and Tx-LLM models." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.747, + 0.885, + 0.775 + ], + "angle": 0, + "content": "* Table S.16 Performance on regression and generation tasks for TxGemma-Predict, TxGemma-Chat, and Tx-LLM models." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.777, + 0.885, + 0.806 + ], + "angle": 0, + "content": "* Table S.17 Performance on binary classification tasks for TxGemma-Predict and TxGemma-Chat models trained only on datasets with commercial licenses." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.808, + 0.885, + 0.837 + ], + "angle": 0, + "content": "* Table S.18 Performance on regression and generation tasks for TxGemma-Predict and TxGemma-Chat models trained only on datasets with commercial licenses." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.838, + 0.884, + 0.868 + ], + "angle": 0, + "content": "* Figure S.4 Performance of TxGemma-27B-Predict compared to generalist and specialist SOTA models." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.868, + 0.885, + 0.883 + ], + "angle": 0, + "content": "* Figure S.5 Comparison of TxGemma-27B-Predict with LlaSMol on select small molecule tasks." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.884, + 0.878, + 0.899 + ], + "angle": 0, + "content": "* Figure S.6 Comparison of TxGemma-27B-Predict with MolE on select small molecule tasks." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.899, + 0.689, + 0.914 + ], + "angle": 0, + "content": "* Figure S.11 Inference speed of TxGemma models at various sizes." + }, + { + "type": "list", + "bbox": [ + 0.2, + 0.655, + 0.885, + 0.914 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.885, + 0.95 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.201, + 0.091, + 0.775, + 0.105 + ], + "angle": 0, + "content": "* Figure S.12 Percent contamination for datasets and cosine similarity analysis." + }, + { + "type": "text", + "bbox": [ + 0.202, + 0.106, + 0.884, + 0.135 + ], + "angle": 0, + "content": "* Figure S.13 Performance on contaminated datasets before and after filtering out contaminated datapoints." + }, + { + "type": "text", + "bbox": [ + 0.202, + 0.136, + 0.733, + 0.15 + ], + "angle": 0, + "content": "* Figure S.16 Performance by feature type of all TxGemma-Predict sizes." + }, + { + "type": "text", + "bbox": [ + 0.202, + 0.151, + 0.885, + 0.181 + ], + "angle": 0, + "content": "* Figure S.17 Comparison of TxGemma-Predict performances over different sizes and with Gemma-2 models." + }, + { + "type": "text", + "bbox": [ + 0.202, + 0.182, + 0.885, + 0.212 + ], + "angle": 0, + "content": "* Figure S.18 Correlations of TxGemma-27B-Predict predictions for toxicity and clinical trial approval tasks." + }, + { + "type": "list", + "bbox": [ + 0.201, + 0.091, + 0.885, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.215, + 0.764, + 0.229 + ], + "angle": 0, + "content": "- Conversing with TxGemma-27B-Predict and TxGemma-27B-Chat (Section D.2)" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.232, + 0.884, + 0.261 + ], + "angle": 0, + "content": "* Figure S.7 Comparison of TxGemma-27B-Predict, TxGemma-27B-Chat, and Gemma-2-27B on MMLU." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.263, + 0.821, + 0.277 + ], + "angle": 0, + "content": "* Figure S.8 Example of a dialogue with TxGemma-27B-Predict about general topics." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.278, + 0.885, + 0.293 + ], + "angle": 0, + "content": "* Figure S.9 Example of a multi-turn dialogue with TxGemma-27B-Predict about its predictions." + }, + { + "type": "text", + "bbox": [ + 0.202, + 0.294, + 0.885, + 0.323 + ], + "angle": 0, + "content": "* Figure S.10 Example of a prompt format the enables TxGemma-Chat to provide reasoning for challenging tasks." + }, + { + "type": "list", + "bbox": [ + 0.201, + 0.232, + 0.885, + 0.323 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.326, + 0.508, + 0.341 + ], + "angle": 0, + "content": "- Additional Agentic-Tx Results (Section D.3)" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.344, + 0.865, + 0.358 + ], + "angle": 0, + "content": "* Figure S.14 Agentic-Tx tool use frequencies for chemical preference and HLE benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.359, + 0.861, + 0.374 + ], + "angle": 0, + "content": "* Figure S.15 Agentic-Tx tool use frequency per question for chemical preference questions." + }, + { + "type": "list", + "bbox": [ + 0.201, + 0.344, + 0.865, + 0.374 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.377, + 0.596, + 0.392 + ], + "angle": 0, + "content": "- Proof-of-concept example using TxGemma (Section D.4)" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.395, + 0.884, + 0.425 + ], + "angle": 0, + "content": "* Figure S.3 Illustration of a possible application of TxGemma to end-to-end therapeutic development." + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.111, + 0.091, + 0.262, + 0.106 + ], + "angle": 0, + "content": "B Data details" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.12, + 0.884, + 0.152 + ], + "angle": 0, + "content": "This section provides a breakdown of the tasks used in our study, including information on excluded tasks and the size of training, validation, and test sets for binary classification, regression, and generation tasks." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.156, + 0.886, + 0.324 + ], + "angle": 0, + "content": "As previously mentioned, we excluded a small number of tasks from TDC for various reasons. Table S.1 provides an overview of the excluded tasks and the rationale behind their exclusion. The primary reasons for exclusion were the tasks' relevance to the study, limitations of LLMs, and specific data characteristics, such as the absence of clear metrics or redundancy. For instance, tasks like QM7b, QM8, and QM9, which focus on predicting quantum properties, were not directly relevant to the study's focus on therapeutic development. Similarly, IEDB Jespersen and PDB Jespersen were excluded due to their small size and the complexity of implementing token prediction, as opposed to binary classification, within an LLM framework. Tasks such as DrugBank DDI, TWOSIDES, and USPTO Catalyst posed challenges due to the large number of potential labels, making them difficult for LLMs to process effectively. MOSES, ZINC, and ChEMBL were excluded because they lacked well-defined evaluation metrics. Finally, USPTO 50K and USPTO Reaction were excluded as they either overlapped with or were subsets of the USPTO task." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.329, + 0.884, + 0.436 + ], + "angle": 0, + "content": "Tables S.2 and S.3 specify the number of samples in the training, validation, and test sets for the included binary classification, regression, and generation tasks, respectively. Substantial variability in task sizes across different tasks is shown in these tables. The binary classification tasks range from 196 to 1,406,988 samples, while the regression and generation tasks range from 345 to 775,767 samples. This variability highlights the diverse data availability landscape across various tasks. Figure S.1 provides a visual representation of the distribution of TDC task sizes, aggregated across train, validation, and test sets. For tasks encompassing multiple subtasks, like ToxCast, the task size is computed by summing the sizes of each individual dataset." + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.453, + 0.669, + 0.62 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.635, + 0.886, + 0.681 + ], + "angle": 0, + "content": "Figure S.1 | Distribution of TDC task sizes, aggregated over train, validation, and test sets. For tasks containing multiple datasets, such as ToxCast which contains data for more than 600 different assays, the task size is calculated by summing over the sizes for each dataset." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.704, + 0.886, + 0.873 + ], + "angle": 0, + "content": "Tables S.4 and S.5 provide a brief description of the tasks, as well as the types of inputs (e.g. protein, small molecules, etc.). These tasks are diverse and encompass many different aspects of development. Some tasks corresponding to gene-disease association or protein-protein interaction prediction are useful for early-stage development, in order to identify mechanisms of disease and relevant targets. Predictions of antibody affinity, drug-target interaction, high-throughput screening, drug synergy are useful for intermediate development steps that involve proposing candidate therapeutics based on their interaction with a target. Predictions of toxicity, pharmacokinetics, and developability are useful for filtering candidates down based on favorable druglike properties. Predictions of clinical trial outcome, reaction yields, retrosynthesis are useful for late-stage development where understanding the likelihood of clinical trial approval and manufacturing potential are critical. There are also tasks that are highly specific for particular therapeutics types, which include predictions of CRISPR repair, peptide-MHC binding, miRNA-Target interaction, and TCR-epitope binding." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.877, + 0.884, + 0.909 + ], + "angle": 0, + "content": "Binary classification tasks always output “(A)” or “(B)”, where “(A)” is a negative answer to the question which is specified in the prompt and “(B)” is a positive answer. Regression tasks output an integer between" + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.884, + 0.949 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.091, + 0.888, + 0.138 + ], + "angle": 0, + "content": "0 and 1000, which can be transformed back into the original task-specific label space. The output of the USPTO generation task is the SMILES string of the predicted molecules. Table S.6 lists the different types of inputs in the processed TDC data along with illustrative examples." + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.157, + 0.886, + 0.203 + ], + "angle": 0, + "content": "Table S.1 | Excluded TDC tasks and reasons for exclusion. The tasks were excluded primarily due to their relevance to the study, limitations inherent to large language models (LLMs), and specific data characteristics, such as a lack of clear evaluation metrics or redundancy." + }, + { + "type": "table", + "bbox": [ + 0.118, + 0.208, + 0.88, + 0.491 + ], + "angle": 0, + "content": "
Task NameReason for Exclusion
QM7bPrediction of quantum properties is not closely related to therapeutic development.
QM8Prediction of quantum properties is not closely related to therapeutic development.
QM9Prediction of quantum properties is not closely related to therapeutic development.
IEDB JespersenAmount of data is small, and token prediction is more difficult to implement in a LLM than binary classification.
PDB JespersenAmount of data is small, and token prediction is more difficult to implement in a LLM than binary classification.
DrugBank DDILarge number of possible labels is difficult to implement in a LLM.
TWOSIDESLarge number of possible labels is difficult to implement in a LLM.
USPTO CatalystLarge number of possible labels is difficult to implement in a LLM.
MOSESNo clear metric.
ZINCNo clear metric.
ChEMBLNo clear metric.
USPTO 50KSubset of USPTO.
USPTO ReactionSame data as USPTO.
" + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.885, + 0.95 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.174, + 0.887, + 0.234 + ], + "angle": 0, + "content": "Table S.2 | Number of samples in training, validation, and test sets for all binary classification tasks. The binary classification tasks range in size from a minimum of 196 samples (Carcinogens Lagunin) to a maximum of 1,406,988 samples (butkiewicz), highlighting the considerable variability in data availability across different tasks. The task type and split type are also indicated following the TDC classification and recommendation." + }, + { + "type": "table", + "bbox": [ + 0.076, + 0.24, + 0.925, + 0.822 + ], + "angle": 0, + "content": "
Task NameTask TypeSplit TypeTraining SizeValidation SizeTest Size
AMESToxicityScaffold5,0937281,457
BBB MartinsPharmacokineticsScaffold1,421203406
Bioavailability MaPharmacokineticsScaffold1,344192384
CYP1A2 VeithPharmacokineticsScaffold8,8051,2572,517
CYP2C19 VeithPharmacokineticsScaffold8,8651,2662,534
CYP2C9 Substrate CarbonMangelsPharmacokineticsScaffold46767135
CYP2C9 VeithPharmacokineticsScaffold8,4631,2102,419
CYP2D6 Substrate CarbonMangelsPharmacokineticsScaffold46567135
CYP2D6 VeithPharmacokineticsScaffold9,1911,3132,626
CYP3A4 Substrate CarbonMangelsPharmacokineticsScaffold46867135
CYP3A4 VeithPharmacokineticsScaffold8,6281,2332,467
Carcinogens LaguninToxicityScaffold1962856
ClinToxToxicityScaffold1,034147297
DILIToxicityScaffold3255496
HIA HouPharmacokineticsScaffold40358117
HIV*High-throughput screeningScaffold28,7884,1128,227
HuRIProtein-protein interactionCold-start45,8559873,694
MHC1 IEDB IMGT NielsenPeptide-MHC bindingRandom130,19018,59837,197
MHC2 IEDB JensenPeptide-MHC bindingRandom93,99713,42826,856
PAMPA NCATSPharmacokineticsScaffold1,423203408
Pgp BrocatelliPharmacokineticsScaffold851122245
SARSCOV2 3CLPro DiamondHigh-throughput screeningScaffold61688176
SARSCoV2 Vitro TouretHigh-throughput screeningScaffold1,038148298
SAbDab ChenDevelopabilityRandom1,686241482
Skin ReactionToxicityScaffold2824082
Tox21ToxicityScaffold54,5567,79015,600
ToxCastToxicityScaffold1,073,279153,099307,282
butkiewiczHigh-throughput screeningRandom1,406,988200,99840,1997
hERGToxicityScaffold45766132
hERG KarimToxicityScaffold9,4111,3442,690
herg centralToxicityScaffold214,82530,68961,379
miRTarBasemiRNA-target interactionRandom559,59179,948159,889
phase1Clinical trial outcomeCold-start1,546258598
phase2Clinical trial outcomeCold-start5,7927161,282
phase3Clinical trial outcomeCold-start41,255321,084
weberTCR-epitope bindingCold-start33,0134,7489,421
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.111, + 0.823, + 0.456, + 0.835 + ], + "angle": 0, + "content": "* To predict whether compounds have Anti-HIV properties." + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.884, + 0.949 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.203, + 0.886, + 0.261 + ], + "angle": 0, + "content": "Table S.3 | Number of samples in training, validation, and test sets for all regression and generation tasks. The regression and generation tasks vary significantly in size, ranging from a minimum of 345 samples (Protein SAbDab) to a maximum of 775,767 samples (USPTO). The task type and split type are also indicated following the TDC classification and recommendation." + }, + { + "type": "table", + "bbox": [ + 0.109, + 0.269, + 0.891, + 0.806 + ], + "angle": 0, + "content": "
Task NameTask TypeSplit TypeTraining SizeValidation SizeTest Size
BindingDB PatentDrug-target interactionTemporal146,80036,63049,028
BindingDB ic50Drug-target interactionCold-start375,1277,53131,495
BindingDB kdDrug-target interactionCold-start19,0343762,321
BindingDB kiDrug-target interactionCold-start57,6561,1894,709
Buchwald HartwigReaction yieldsRandom2,768396791
Caco2 WangPharmacokineticsScaffold63791182
Clearance Hepatocyte AZPharmacokineticsScaffold848122243
Clearance Microsome AZPharmacokineticsScaffold770111221
DAVISDrug-target interactionCold-start12,4552661,064
DisGeNETGene-disease associationRandom39,4255,62111,200
DrugComb BlissDrug synergyCombination207,77229,61859,708
DrugComb CSSDrug synergyCombination207,77229,61859,708
DrugComb HSADrug synergyCombination207,77229,61859,708
DrugComb LoeweDrug synergyCombination207,77229,61859,708
DrugComb ZIPDrug synergyCombination207,77229,61859,708
GDSC1Drug responseRandom124,11717,73135,462
GDSC2Drug responseRandom64,8929,27018,541
Half Life ObachPharmacokineticsScaffold46567135
KIBADrug-target interactionCold-start59,3261,0424,524
LD50 ZhuToxicityScaffold5,1687391,478
LeenayCRISPR repairRandom5,3257601,520
Lipophilicity AstraZenecaPharmacokineticsScaffold2,940420840
OncoPolyPharmacologyDrug synergyCombination16,0142,3314,707
PPBR AZPharmacokineticsScaffold1,952279559
Protein SAbDabAntibody affinityRandom3454999
Solubility AqSolDBPharmacokineticsScaffold6,9889981,996
TAPDevelopabilityRandom845120240
USPTORetrosynthesisRandom775,767110,824221,648
USPTO YieldsReaction yieldsRandom597,54685,364170,728
VDss LombardoPharmacokineticsScaffold791113226
" + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.884, + 0.949 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.145, + 0.887, + 0.175 + ], + "angle": 0, + "content": "Table S.4 | Inputs and task descriptions for binary classification tasks. All output responses are either (A) for negative or (B) for positive." + }, + { + "type": "table", + "bbox": [ + 0.087, + 0.18, + 0.912, + 0.849 + ], + "angle": 0, + "content": "
Task NameInputDescription
AMESSmall moleculeGiven a drug SMILES, predict whether it is mutagenic.
BBB MartinsSmall moleculeGiven a drug SMILES, predict whether it can cross the blood-brain barrier.
Bioavailability MaSmall moleculeGiven a drug SMILES, predict whether it is orally available.
CYP1A2 VeithSmall moleculeGiven a drug SMILES, predict whether it inhibits CYP1A2.
CYP2C19 VeithSmall moleculeGiven a drug SMILES, predict whether it inhibits CYP2C19.
CYP2C9 Substrate CarbonMangelsSmall moleculeGiven a drug SMILES, predict whether it is a substrate to CYP2C9.
CYP2C9 VeithSmall moleculeGiven a drug SMILES, predict whether it inhibits CYP2C9.
CYP2D6 Substrate CarbonMangelsSmall moleculeGiven a drug SMILES, predict whether it is a substrate to CYP2D6.
CYP2D6 VeithSmall moleculeGiven a drug SMILES, predict whether it inhibits CYP2D6.
CYP3A4 Substrate CarbonMangelsSmall moleculeGiven a drug SMILES, predict whether it is a substrate to CYP3A4.
CYP3A4 VeithSmall moleculeGiven a drug SMILES, predict whether it inhibits CYP3A4.
Carcinogens LaguninSmall moleculeGiven a drug SMILES, predict whether it is a carcinogen.
ClinToxSmall moleculeGiven a drug SMILES, predict whether it is toxic.
DILISmall moleculeGiven a drug SMILES, predict whether it can cause liver injury.
HIA HouSmall moleculeGiven a drug SMILES, predict whether it is absorbed in the human intestine.
HIV*Small moleculeGiven a drug SMILES, predict whether it has anti-HIV activity.
HuRIProteinGiven the amino acid sequences of two proteins, predict whether the proteins interact.
MHC1 IEDB IMGT NielsenProteinGiven the amino acid of the peptide and pseudo amino acid of MHC 1, predict whether the peptide binds to the MHC.
MHC2 IEDB JensenProteinGiven the amino acid of the peptide and pseudo amino acid of MHC 2, predict whether the peptide binds to the MHC.
PAMPA NCATSSmall moleculeGiven a drug SMILES, predict whether it is permeable in a PAMPA assay.
Pgp BroccatelliSmall moleculeGiven a drug SMILES, predict whether it inhibits Pgp.
SARSCOV2 3CLPro DiamondSmall moleculeGiven a drug SMILES, predict whether it binds SARS-CoV-2 3CL protease.
SARSCOV2 Vitro TouretSmall moleculeGiven a drug SMILES, predict whether it inhibits SARS-CoV-2 replication.
SAbDab ChenProteinGiven an antibody heavy chain and light chain sequence, whether it is developable.
Skin ReactionSmall moleculeGiven a drug SMILES, predict whether it can cause skin reaction.
Tox21Small moleculeGiven a drug SMILES, predict whether it is toxic in various assays.
ToxCastSmall moleculeGiven a drug SMILES, predict whether it is toxic in various assays.
butkiewiczSmall moleculeGiven a drug SMILES, predict whether it is active against various proteins.
hERGSmall moleculeGiven a drug SMILES, predict whether it blocks hERG.
hERG KarimSmall moleculeGiven a drug SMILES, predict whether it inhibits hERG.
herg centralSmall moleculeGiven a drug SMILES, predict whether it inhibits hERG.
miRTarBase phase1Nucleic acid & proteinGiven the miRNA mature and target amino acid, predict whether they interact.
phase2Small molecule & diseaseGiven a drug SMILES and disease, predict whether the phase 1 trial will be approved.
phase3Small molecule & diseaseGiven a drug SMILES and disease, predict whether the phase 2 trial will be approved.
weberSmall molecule & diseaseGiven a drug SMILES and disease, predict whether the phase 3 trial will be approved.
ProteinGiven the amino acid of the epitope and a T-cell receptor (amino acid of the hypervariable CDR3 loop), predict whether the epitope binds to the TCR.
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.112, + 0.852, + 0.458, + 0.864 + ], + "angle": 0, + "content": "* To predict whether compounds have Anti-HIV properties." + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.171, + 0.887, + 0.231 + ], + "angle": 0, + "content": "Table S.5 | Inputs and task descriptions for regression and generation tasks. Regression task outputs are integers between 0 and 1000, which represents a binned transformation of the original numeric label. On evaluation, the integer output is transformed back into the original numeric label space. For the USPTO generation task, the output is the SMILES string of the predicted set of small molecules." + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.237, + 0.922, + 0.84 + ], + "angle": 0, + "content": "
Task NameInputDescription
BindingDB PatentProtein & small moleculeGiven the target amino acid and drug SMILES, predict their binding affinity.
BindingDB ic50ProteinGiven the target amino acid and drug SMILES, predict their IC50.
BindingDB kdProteinGiven the target amino acid and drug SMILES, predict their Kd.
BindingDB kiProteinGiven the target amino acid and drug SMILES, predict their Ki.
Buchwald HartwigSmall moleculeGiven a product, a catalyst, and a reactant SMILES, predict the reaction yield.
Caco2 WangSmall moleculeGiven a drug SMILES, predict the cell effective permeability.
Clearance Hepatocyte AZSmall moleculeGiven a drug SMILES, predict the activity of hepatocyte clearance.
Clearance Microsome AZSmall moleculeGiven a drug SMILES, predict the activity of microsome clearance.
DAVISProtein & small moleculeGiven the target amino acid and drug SMILES, predict their binding affinity.
DisGeNETProtein & diseaseGiven the disease description and the amino acid of the gene, predict their association.
DrugComb BlissSmall molecule & cell lineGiven two drug SMILESs and a cell line description, predict the drug synergy level.
DrugComb CSSSmall molecule & cell lineGiven two drug SMILESs and a cell line description, predict the drug synergy level.
DrugComb HSASmall molecule & cell lineGiven two drug SMILESs and a cell line description, predict the drug synergy level.
DrugComb LoeweSmall molecule & cell lineGiven two drug SMILESs and a cell line description, predict the drug synergy level.
DrugComb ZIPSmall molecule & cell lineGiven two drug SMILESs and a cell line description, predict the drug synergy level.
GDSC1Small molecule & cell lineGiven a drug SMILES and a cell line description, predict the drug sensitivity level.
GDSC2Small molecule & cell lineGiven a drug SMILES and a cell line description, predict the drug sensitivity level.
Half Life ObachSmall moleculeGiven a drug SMILES, predict the half life duration.
KIBAProtein & small moleculeGiven the target amino acid and drug SMILES, predict their binding affinity.
LD50 ZhuSmall moleculeGiven a drug SMILES, predict its LD50 toxicity.
LeenayNucleic acidGiven a GuideSeq sequence, predict various properties.
Lipophilicity AstraZenecaSmall moleculeGiven a drug SMILES, predict the lipophilicity.
OncoPolyPharmacologyCell line & small moleculeGiven two drug SMILESs and a cell line description, predict the drug synergy level.
PPBR AZSmall moleculeGiven a drug SMILES, predict the plasma protein binding rate.
Protein SAbDabProteinGiven the amino acid of the antibody and antigen, predict the binding affinity.
Solubility AqSolDBSmall moleculeGiven a drug SMILES, predict the activity of solubility.
TAPProteinGiven an antibody heavy chain and light chain sequence, predict its CDR length.
USPTOSmall moleculeGiven the product SMILES, generate the reactant SMILESs.
USPTO YieldsSmall moleculeGiven a catalyst SMILES, reactant SMILES, and product SMILES, predict the yield.
VDss LombardoSmall moleculeGiven a drug SMILES, predict the volume of distributon.
" + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.884, + 0.949 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.4, + 0.887, + 0.463 + ], + "angle": 0, + "content": "Table S.6 | Types of drugs and targets found in our data. Features found in our data as well as their textual representation and an illustrative example. Protein sequences are divided into several subtypes: some proteins and peptides are represented using their full amino acid sequence whereas MHC molecules are represented using the amino acid pseudo-sequences that only use residues in contact with a peptide, and TCRs only use CDR3 hypervariable loops." + }, + { + "type": "table", + "bbox": [ + 0.118, + 0.467, + 0.88, + 0.59 + ], + "angle": 0, + "content": "
Representation TypeRepresentationExample
Small MoleculesSMILES stringCN1C(=O)CN=C(C2=CCCCC2)c2cc(Cl)ccc21
Amino Acid: Proteins and peptidesAmino acid sequencesQLADETLLKV
Amino Acid: MHC moleculesPseudo-sequences †YFAMYGEKVAHTHVDTLYVRYHYYTWAEWAYTWY
Amino Acid: T cell receptorsCDR3 hypervariable loopsCSASEGTSSYEQYF
Nucleic acidNucleotide sequenceACAGCCCAGCAGUUUAUCACGGG
DiseaseEnglish textChronic myeloproliferative disease
Cell LineEnglish textNU-1, stomach cell sourced from cancer
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.112, + 0.596, + 0.376, + 0.608 + ], + "angle": 0, + "content": "† Only for residues in contact with a peptide." + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.883, + 0.949 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.111, + 0.09, + 0.29, + 0.106 + ], + "angle": 0, + "content": "C Method details" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.12, + 0.889, + 0.304 + ], + "angle": 0, + "content": "This section elaborates on the modeling choices employed in the development of TxGemma. Tables S.7 and S.8 illustrate prompts used for binary classification, regression, and generation tasks, showcasing the input structure for the model including the instructions and context provided to the model. Table S.9 provide a concrete example of few-shot prompting applied to a binary classification task using 10 examples with nearest-neighbor shots. Each dataset in our data is structured as a text prompt, consisting of instructions, context, a question, and the corresponding answer. To provide relevant background, we created 2-3 sentence contexts based on TDC dataset descriptions and literature searches. Prompts used for predicting adverse events in clinical trials based on the TrialBench dataset [1] are shown in Table S.10. To illustrate the reasoning process of Agentic-Tx, Table S.11 provides an example of the steps taken to answer a chemical preference question from ChemBench. Table S.12 also provides a comprehensive list of the tools available of Agentic-Tx. Section C.1 provides details of the Wilcoxon signed-rank test used to assess the performance of our models across all tasks." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.309, + 0.888, + 0.37 + ], + "angle": 0, + "content": "We utilize random data points from the training set for few-shot learning during training. Although we use nearest neighbor shots for evaluation, we opt for random shots during training due to the higher intra-set similarity observed within the training data compared to between training and test sets, as illustrated in Figure S.2." + }, + { + "type": "image", + "bbox": [ + 0.312, + 0.387, + 0.688, + 0.571 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.109, + 0.585, + 0.892, + 0.631 + ], + "angle": 0, + "content": "Figure S.2 | Distribution of the Tanimoto similarities for the 10 nearest neighbors in the AMES task. Nearest neighbors are calculated from the training set for training and validation sets, and from both the training and validation sets for the test set." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.661, + 0.441, + 0.679 + ], + "angle": 0, + "content": "C.1 Aggregated method comparison" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.688, + 0.888, + 0.72 + ], + "angle": 0, + "content": "For a pair of performances \\((x_{i},y_{i})\\) of a task \\(i\\), the test statistic of the Wilcoxon signed-rank test is calculated as the minimum of the positive-rank sum \\((W^{+})\\) and the negative-rank sum \\((W^{-})\\)," + }, + { + "type": "equation", + "bbox": [ + 0.443, + 0.735, + 0.887, + 0.768 + ], + "angle": 0, + "content": "\\[\nW ^ {+} = \\sum_ {X _ {i} > 0} R _ {i} \\tag {1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.443, + 0.783, + 0.887, + 0.817 + ], + "angle": 0, + "content": "\\[\nW ^ {-} = \\sum_ {X _ {i} < 0} R _ {i} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.829, + 0.888, + 0.89 + ], + "angle": 0, + "content": "where \\( X_{i} = x_{i} - y_{i} \\) and \\( R_{i} \\) is the rank of \\( |x_{i} - y_{i}| \\). In order to account for the differences in magnitudes for MAE and MSE metrics, we normalized all performances by the mean of the performances from both models. We also reversed the sign of MAEs and MSEs because lower MAEs and MSEs correspond to better performances." + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.886, + 0.95 + ], + "angle": 0, + "content": "32" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.112, + 0.151, + 0.529, + 0.165 + ], + "angle": 0, + "content": "Table S.7 | Example of prompts for binary classification tasks." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.187, + 0.546, + 0.2 + ], + "angle": 0, + "content": "Instructions: Answer the following question about drug properties." + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.2, + 0.874, + 0.239 + ], + "angle": 0, + "content": "Context: As a membrane separating circulating blood and brain extracellular fluid, the blood-brain barrier (BBB) is the protection layer that blocks most foreign drugs. Thus the ability of a drug to penetrate the barrier to deliver to the site of action forms a crucial challenge in development of drugs for central nervous system." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.24, + 0.495, + 0.252 + ], + "angle": 0, + "content": "Question: Given a drug SMILES string, predict whether it" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.253, + 0.427, + 0.265 + ], + "angle": 0, + "content": "(A) does not cross the BBB (B) crosses the BBB" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.266, + 0.513, + 0.279 + ], + "angle": 0, + "content": "Drug SMILES: \\(\\mathrm{CN1C(=O)CN = C(C2 = CCCCC2)c2cc(Cl)ccc21}\\)" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.28, + 0.216, + 0.292 + ], + "angle": 0, + "content": "Answer: (B)" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.315, + 0.586, + 0.327 + ], + "angle": 0, + "content": "Instructions: Answer the following question about peptide-MHC binding." + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.328, + 0.872, + 0.405 + ], + "angle": 0, + "content": "Context: In the human body, T cells monitor the existing peptides and trigger an immune response if the peptide is foreign. To decide whether or not if the peptide is not foreign, the peptide must bind to a major histocompatibility complex (MHC) molecule. Therefore, predicting peptide-MHC binding affinity is pivotal for determining immunogenicity. In some experiments, the peptide binding is measured against cells that express multiple MHCs, so the peptide could be binding any one of the possible MHCs. Class 1 MHC molecules bind to peptides that are usually 8-14 amino acids long and activate CD8 T cells." + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.407, + 0.871, + 0.432 + ], + "angle": 0, + "content": "Question: Given the amino acid sequence of the peptide and possible pseudo amino acid sequences of MHC 1, predict whether the peptide" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.433, + 0.551, + 0.446 + ], + "angle": 0, + "content": "(A) does not bind to any of the MHCs (B) binds to any of the MHCs" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.447, + 0.412, + 0.459 + ], + "angle": 0, + "content": "Peptide amino acid sequence: QLADETLLKV" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.459, + 0.681, + 0.472 + ], + "angle": 0, + "content": "Possible MHC pseudosequences: YFAMYGEKAVTHVDTLYVRYHYTTYEAWAYTWY" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.473, + 0.216, + 0.485 + ], + "angle": 0, + "content": "Answer: (B)" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.508, + 0.622, + 0.52 + ], + "angle": 0, + "content": "Instructions: Answer the following question about miRNA protein interactions." + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.521, + 0.872, + 0.56 + ], + "angle": 0, + "content": "Context: MicroRNAs (miRNAs) are, small non-coding RNAs with 18-25 nucleotides, which are central regulators at the post-transcriptional level in both animals and plants. Perfect or near-perfect complementary binding of miRNAs and their target mRNA negatively regulates gene expression by accelerating mRNA degradation or suppressing mRNA translation." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.561, + 0.717, + 0.573 + ], + "angle": 0, + "content": "Question: Given the miRNA mature sequence and target amino acid sequence, predict whether" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.574, + 0.605, + 0.586 + ], + "angle": 0, + "content": "(A) the miRNA and target do not interact (B) the miRNA and target interact" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.587, + 0.462, + 0.599 + ], + "angle": 0, + "content": "miRNA sequence: UUCCUGUCAGCCGUGGGUGCC" + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.6, + 0.872, + 0.638 + ], + "angle": 0, + "content": "Target amino acid sequence: MSVNMDELRHQVMINQFVLAAGCAADQAKQLLQAAHWQFETALSTFFQET-NIPNSHHHHQMMCTPSNTPATPPNFPDALAMFSKLRASEGLQSSNSPMTAAACSPANFSPFWASSPPSHQAPWIP-PSSPTTFHLHRPQPTWPPGAQQGGAQQKAMAAMDGQR" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.64, + 0.216, + 0.652 + ], + "angle": 0, + "content": "Answer: (A)" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.675, + 0.53, + 0.687 + ], + "angle": 0, + "content": "Instructions: Answer the following question about clinical trials." + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.688, + 0.872, + 0.766 + ], + "angle": 0, + "content": "Context: Clinical trial is the most time and cost-consuming step in the drug discovery process. Phase 1 clinical trials test the safety and basic properties of a new drug or treatment in a small group of people for the first time. Optimizing and designing trials with machine learning could drastically lead to the speedup of delivery of life-saving therapeutics to patients. Clinical trial outcome prediction is a machine learning task that aims to forecast the outcome of clinical trials, such as the approval rate of a drug or treatment. It utilizes various clinical trial features, including the drug's molecular structure and patient disease." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.767, + 0.617, + 0.78 + ], + "angle": 0, + "content": "Question: Given a drug SMILES string and disease, predict if the phase 1 trial" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.781, + 0.437, + 0.793 + ], + "angle": 0, + "content": "(A) would not be approved (B) would be approved" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.794, + 0.704, + 0.807 + ], + "angle": 0, + "content": "Drug SMILES: COC1=NC(N)=NC2=C1N=CN2[C@@H]1O[C@H](CO)[C@@H](O)[C@@H]1O" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.808, + 0.391, + 0.819 + ], + "angle": 0, + "content": "Disease: Chronic myeloproliferative disease" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.82, + 0.216, + 0.833 + ], + "angle": 0, + "content": "Answer: (A)" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "33" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.112, + 0.178, + 0.566, + 0.192 + ], + "angle": 0, + "content": "Table S.8 | Example of prompts for regression and generation tasks." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.214, + 0.546, + 0.226 + ], + "angle": 0, + "content": "Instructions: Answer the following question about drug properties." + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.227, + 0.871, + 0.265 + ], + "angle": 0, + "content": "Context: The human colon epithelial cancer cell line, Caco-2, is used as an in vitro model to simulate the human intestinal tissue. The experimental result on the rate of drug passing through the Caco-2 cells can approximate the rate at which the drug permeates through the human intestinal tissue." + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.267, + 0.872, + 0.292 + ], + "angle": 0, + "content": "Question: Given a drug SMILES string, predict its normalized Caco-2 cell effective permeability from 000 to 1000, where 000 is minimum permeability and 1000 is maximum permeability." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.293, + 0.501, + 0.305 + ], + "angle": 0, + "content": "Drug SMILES: \\( \\mathrm{O} = \\mathrm{C}(\\mathrm{O})\\mathrm{{COC}}\\left( { = \\mathrm{O}}\\right) \\mathrm{{Cc}}1\\text{ccc}\\mathrm{{cc}}1\\mathrm{{Nc}}1\\mathrm{{c}}\\left( \\mathrm{{Cl}}\\right) \\mathrm{{ccc}}1\\mathrm{{Cl}} \\)" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.306, + 0.217, + 0.317 + ], + "angle": 0, + "content": "Answer: 788" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.341, + 0.542, + 0.354 + ], + "angle": 0, + "content": "Instructions: Answer the following question about drug responses." + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.355, + 0.872, + 0.393 + ], + "angle": 0, + "content": "Context: The same drug compound could have various levels of responses in different patients. To design drug for individual or a group with certain characteristics is the central goal of precision medicine. In experiments, IC50s of drugs were measured against cancer cell lines." + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.394, + 0.873, + 0.419 + ], + "angle": 0, + "content": "Question: Given a drug SMILES string and a cell line description, predict the normalized drug sensitivity from 000 to 1000, where 000 is minimum drug sensitivity and 1000 is maximum drug sensitivity." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.42, + 0.631, + 0.433 + ], + "angle": 0, + "content": "Drug SMILES: \\(\\mathrm{CN1C = C(C2 = CC = CC = C21) / C = C\\backslash 3 / C4 = C(C = CC = N4)NC3 = O}\\)" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.434, + 0.511, + 0.446 + ], + "angle": 0, + "content": "Cell line description: SNU-1, stomach cell sourced from cancer" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.447, + 0.217, + 0.458 + ], + "angle": 0, + "content": "Answer: 615" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.482, + 0.596, + 0.494 + ], + "angle": 0, + "content": "Instructions: Answer the following question about drug target interactions." + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.496, + 0.872, + 0.56 + ], + "angle": 0, + "content": "Context: Drug-target binding is the physical interaction between a drug and a specific biological molecule, such as a protein or enzyme. This interaction is essential for the drug to exert its pharmacological effect. The strength of the drug-target binding is determined by the binding affinity, which is a measure of how tightly the drug binds to the target. Kd is the dissociation constant of a drug-target complex. It is the concentration of drug at which half of the drug-target complexes have dissociated. A lower Kd value indicates a stronger binding affinity." + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.561, + 0.872, + 0.585 + ], + "angle": 0, + "content": "Question: Given the target amino acid sequence and compound SMILES string, predict their normalized binding affinity Kd from 000 to 1000, where 000 is minimum Kd and 1000 is maximum Kd." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.587, + 0.47, + 0.599 + ], + "angle": 0, + "content": "Drug SMILES: \\(\\mathrm{O = S(=O)(O)c1cccc2ccc(Nc3cccccc)3c12}\\)" + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.6, + 0.873, + 0.638 + ], + "angle": 0, + "content": "Target amino acid sequence: MATVQQLEGRWRLVDSKGFDEYMKELGVIALRKMGAMKPDCIITCDGKNLTIKTESTLKITTQFSCTLGEKFETTADGRKTQTVCNFTDGALVHQWEWDGKESTITRKLKDGLVVECVMNNVTCTRIYEKVE" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.64, + 0.217, + 0.651 + ], + "angle": 0, + "content": "Answer: 397" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.675, + 0.507, + 0.687 + ], + "angle": 0, + "content": "Instructions: Answer the following question about reactions." + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.688, + 0.872, + 0.74 + ], + "angle": 0, + "content": "Context: Retrosynthesis is the process of finding a set of reactants that can synthesize a target molecule, i.e., product, which is a fundamental task in drug manufacturing. The target is recursively transformed into simpler precursor molecules until commercially available \"starting\" molecules are identified. In a data sample, there is only one product molecule, reactants can be one or multiple molecules." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.741, + 0.623, + 0.754 + ], + "angle": 0, + "content": "Question: Given a product SMILES string, predict the reactant SMILES string." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.754, + 0.805, + 0.767 + ], + "angle": 0, + "content": "Product SMILES: [CH2:12]1[C:7]2([CH2:6][CH2:5][O:15][CH2:1][CH2:8]2)[CH2:13][CH2:14][O:10][C:11]1=[O:17]" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.767, + 0.873, + 0.793 + ], + "angle": 0, + "content": "Answer: [CH:1]12B[CH:5]([CH2:6][CH2:7][CH2:8]1)CCC2.[O:10]1[CH2:14][CH2:13][CH2:12] [CH2:11]1.[OH:15].[Na+].[OH:17]O.CI" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "34" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.213, + 0.887, + 0.245 + ], + "angle": 0, + "content": "Table S.9 | Example of a 10-shot prompt for a binary classification task. Shots are selected from nearest neighbors in the combined training and validation set (not the test set)." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.265, + 0.546, + 0.278 + ], + "angle": 0, + "content": "Instructions: Answer the following question about drug properties." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.278, + 0.874, + 0.318 + ], + "angle": 0, + "content": "Context: As a membrane separating circulating blood and brain extracellular fluid, the blood-brain barrier (BBB) is the protection layer that blocks most foreign drugs. Thus the ability of a drug to penetrate the barrier to deliver to the site of action forms a crucial challenge in development of drugs for central nervous system." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.33, + 0.796, + 0.345 + ], + "angle": 0, + "content": "Question: Given a drug SMILES string, predict whether it (A) does not cross the BBB (B) crosses the BBB" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.352, + 0.479, + 0.366 + ], + "angle": 0, + "content": "Drug SMILES: \\(\\mathrm{CN1C(=O)CN = C(c2cccccc)2c2cc(Cl)ccc21}\\)" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.367, + 0.205, + 0.38 + ], + "angle": 0, + "content": "Answer: (B)" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.392, + 0.487, + 0.406 + ], + "angle": 0, + "content": "Drug SMILES: \\(\\mathrm{CN1C(=O)CN = C(c2cccccc2F)c2cc(Cl)ccc21}\\)" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.407, + 0.205, + 0.419 + ], + "angle": 0, + "content": "Answer: (B)" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.431, + 0.475, + 0.445 + ], + "angle": 0, + "content": "Drug SMILES: \\(\\mathrm{CN1C(=S)CN = C(c2cccccc)2c2cc(Cl)ccc21}\\)" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.446, + 0.205, + 0.458 + ], + "angle": 0, + "content": "Answer: (B)" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.47, + 0.551, + 0.485 + ], + "angle": 0, + "content": "Drug SMILES: CP(C)(=O)CN1C(=O)CN=C(c2cccccc2)c2cc(Cl)ccc21" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.486, + 0.205, + 0.498 + ], + "angle": 0, + "content": "Answer: (B)" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.51, + 0.548, + 0.525 + ], + "angle": 0, + "content": "Drug SMILES: \\(\\mathrm{CN1C(=O)CN = C(c2cccccc)2c2cc([N + ](=O)[O - ])ccc21}\\)" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.525, + 0.205, + 0.537 + ], + "angle": 0, + "content": "Answer: (B)" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.55, + 0.558, + 0.564 + ], + "angle": 0, + "content": "Drug SMILES: CCN(CC)CCN1C(=O)CN=C(c2cccccc2F)c2cc(Cl)ccc21" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.565, + 0.205, + 0.577 + ], + "angle": 0, + "content": "Answer: (B)" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.589, + 0.511, + 0.602 + ], + "angle": 0, + "content": "Drug SMILES: \\( \\mathrm{O} = \\mathrm{C}1\\mathrm{{CN}} = \\mathrm{C}\\left( {c2\\text{ccc}cc2}\\right) c2\\mathrm{{cc}}\\left( \\mathrm{{Cl}}\\right) \\mathrm{{ccc}}2\\mathrm{\\;N}1\\mathrm{{CC}}1\\mathrm{{CC}}1 \\)" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.603, + 0.205, + 0.616 + ], + "angle": 0, + "content": "Answer: (B)" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.628, + 0.51, + 0.642 + ], + "angle": 0, + "content": "Drug SMILES: C#CCN1C(=O)CN=C(c2cccc2)c2cc(Cl)ccc21" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.643, + 0.205, + 0.655 + ], + "angle": 0, + "content": "Answer: (B)" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.668, + 0.527, + 0.682 + ], + "angle": 0, + "content": "Drug SMILES: \\( \\mathrm{O} = \\mathrm{C}1\\mathrm{{CN}} = \\mathrm{C}\\left( {\\mathrm{c}2\\text{ccc} : 2}\\right) \\mathrm{c}2\\mathrm{{cc}}\\left( \\mathrm{{Cl}}\\right) \\mathrm{{ccc}}2\\mathrm{\\;N}1\\mathrm{{CC}}\\left( \\mathrm{F}\\right) \\left( \\mathrm{F}\\right) \\mathrm{F} \\)" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.683, + 0.205, + 0.695 + ], + "angle": 0, + "content": "Answer: (B)" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.708, + 0.589, + 0.722 + ], + "angle": 0, + "content": "Drug SMILES: \\(\\mathrm{CCS}(\\mathrm{=O})(\\mathrm{=O})\\mathrm{CCN1C}(\\mathrm{=O})\\mathrm{CN} = \\mathrm{C}(\\mathrm{c2cccccc2F})\\mathrm{c2cc(Cl)ccc21}\\)" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.722, + 0.205, + 0.734 + ], + "angle": 0, + "content": "Answer: (B)" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.743, + 0.512, + 0.757 + ], + "angle": 0, + "content": "Drug SMILES: \\(\\mathrm{CN1C(=O)CN = C(C2 = CCCCCC2)c2cc(Cl)ccc21}\\)" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.758, + 0.216, + 0.77 + ], + "angle": 0, + "content": "Answer: (B)" + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "35" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.307, + 0.887, + 0.337 + ], + "angle": 0, + "content": "Table S.10 | Example of prompts for predicting adverse events in clinical trials. The top prompt only provides drug SMILES strings while the bottom prompt also includes textual information about the clinical trial." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.348, + 0.746, + 0.362 + ], + "angle": 0, + "content": "From the following information about a clinical trial, predict whether it would have an adverse event." + }, + { + "type": "code", + "bbox": [ + 0.126, + 0.374, + 0.703, + 0.402 + ], + "angle": 0, + "content": "Drug: CC[C@H]1[C@@H](COC2=C3C=C(OC)C(=CC3=CC=N2)C(N)=O)NC(=O)[C@H]1F.[H][C@@]12CC[C@H](O)[C@@]1(C)CC[C@]1([H])C3=C(CC[C@@]21[H])C=C(O)C=C3" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.415, + 0.211, + 0.427 + ], + "angle": 0, + "content": "Answer: No" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.467, + 0.746, + 0.48 + ], + "angle": 0, + "content": "From the following information about a clinical trial, predict whether it would have an adverse event." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.493, + 0.874, + 0.533 + ], + "angle": 0, + "content": "Title: A Study To Estimate The Effect of PF-06650833 On The Pharmacokinetics (PK) of Oral Contraceptive (OC) Summary: This is a Phase 1, open label, fixed sequence study of the effect of multiple dose PF-06650833 on single dose OC PK in healthy female subjects." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.534, + 0.184, + 0.544 + ], + "angle": 0, + "content": "Phase: 1" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.546, + 0.233, + 0.558 + ], + "angle": 0, + "content": "Disease: Healthy" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.56, + 0.276, + 0.571 + ], + "angle": 0, + "content": "Minimum age: 18 Years" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.573, + 0.277, + 0.585 + ], + "angle": 0, + "content": "Maximum age: 60 Years" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.586, + 0.421, + 0.598 + ], + "angle": 0, + "content": "Healthy volunteers: Accepts Healthy Volunteers" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.599, + 0.872, + 0.624 + ], + "angle": 0, + "content": "Interventions: \\(400\\mathrm{mg}\\) by mouth (PO) Once daily (QD) for 11 days; Single dose of Oral tablet containing 30 ug EE and 150 ug of LN" + }, + { + "type": "code", + "bbox": [ + 0.127, + 0.625, + 0.701, + 0.652 + ], + "angle": 0, + "content": "Drug: CC[C@H]1[C@@H](COC2=C3C=C(OC)C(=CC3=CC=N2)C(N)=O)NC(=O)[C@H]1F.[H][C@@]12CC[C@H](O)[C@@]1(C)CC[C@]1([H])C3=C(CC[C@@]21[H])C=C(O)C=C3" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.664, + 0.211, + 0.676 + ], + "angle": 0, + "content": "Answer: No" + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "36" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.112, + 0.111, + 0.784, + 0.125 + ], + "angle": 0, + "content": "Table S.11 | Example of Agentic-Tx ChemBench chemical preference question answering." + }, + { + "type": "table", + "bbox": [ + 0.113, + 0.133, + 0.898, + 0.898 + ], + "angle": 0, + "content": "
Example of Chembench Chemical Preference
QuestionConsider the following two drug candidates: \nA. N#Cclc(NC(=O)c2cc([N+](=O)[O-])ccc2N2CCOCC2)sc2c1CCCC2 \nB. O=C(C=Cclc00001)c1cccc1. \nWhich is more preferable for further development?
Thought 1First, I need to obtain more information about each drug candidate. I will use the SMILES to Description tool to get detailed descriptions of each molecule from their SMILES strings, starting with candidate.
Action 1SMILES to Description \nInput SMILES: N#Cclc(NC(=O)c2cc([N+](=O)[O-])ccc2N2CCOCC2)sc2c1CCCC2
Observation 1PubChem CID: 3934361 \nMolecular Formula: C20H20N4O4S \nMolecular Weight: 412.5 \nCharge: 0 \nIUPAC Name: N-(3-cyano-4,5,6,7-tetrahydro-1-benzothiophen-2-yl)-2-morpholin-4-yl-5-nitrobenzamide XLogP: 3.8 \nSynonyms: MLS000335194, CHEMBL1549645, HMS2597A10, HMS3379H10, AKOS001044982, SMR000249952, SR-01000056848, SR-01000056848-1, Z27367728 \n(Summarized) The provided information describes a single molecule (PubChem CID 3934361) with a cyano-tetrahydro-benzothiophene core, a morpholino-nitrobenzamide substituent, and an XLogP of 3.8, indicating its lipophilicity.
Thought 2Now, I will use the SMILES to Description tool to get detailed descriptions of candidate B from its SMILES string. After that, I will compare the information obtained for both candidates to determine which is more preferable for further development.
Action 2SMILES to Description \nInput SMILES: O=C(C=Cclc00001)c1cccc1
Observation 2PubChem CID: 219207 \nMolecular Formula: C14H11NO \nMolecular Weight: 209.24 \nCharge: 0 \nIUPAC Name: 1-phenyl-3-pyridin-2-ylprop-2-en-1-one XLogP: 2.7 \nSynonyms: 3-(2-PYRIDYL)-ACRYLOPHENONE, MLS002637493, azachalcone, CHEMBL1717486, DTXSID601279307, HMS3079I05, SMR001547031, 1-Phenyl-3-(2-pyridinyl)-2-propen-1-one \n(Summary) The molecule (B) represented by the SMILES O=C(C=Cclc00001)c1cccc1, also known as 1-phenyl-3-pyridin-2-ylprop-2-en-1-one, has a molecular weight of 209.24 g/mol and a calculated XLogP value of 2.7.
Observation 2
Thought 3Based on the descriptions, candidate B (XLogP = 2.7) is less lipophilic than candidate A (XLogP = 3.8). Lower lipophilicity can often be associated with better absorption and distribution properties. I will now use the ClinicalTox tool to assess the clinical toxicity of candidate B.
Action 3ClinicalTox \nInput SMILES: O=C(C=Cclc00001)c1cccc1
Observation 3Context: Humans are exposed to a variety of chemicals through food, household products, and medicines, some of which can be toxic, leading to over 30% of promising pharmaceuticals failing in human trials due to toxicity. Toxic drugs can be identified from clinical trials that failed due to toxicity, while non-toxic drugs can be identified from FDA approval status or from clinical trials that report no toxicity. Prediction returned: O=C(C=Cclc00001)c1cccc1 is not toxic! \n(Summary) Based on the provided information, drug candidate B (O=C(C=Cclc00001)c1cccc1) is predicted to be non-toxic, suggesting it might be more preferable for further development.
Observation 3
Final ResponseCandidate B is more preferable for further development. Here's why: \n-Lipophilicity (XLogP): Candidate B has a lower XLogP value (2.7) compared to Candidate A (3.8). Lower lipophilicity is generally favored in drug development. Excessive lipophilicity (high XLogP) can lead to problems like poor solubility, increased risk of off-target binding and toxicity, and difficulties in achieving good absorption and distribution in the body. A value closer to the range of many oral drugs is preferred. \n-Toxicity: The clinical toxicity tool states that candidate B is predicted to be non-toxic.
" + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.884, + 0.949 + ], + "angle": 0, + "content": "37" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.112, + 0.184, + 0.516, + 0.199 + ], + "angle": 0, + "content": "Table S.12 | Descriptions of tools used by Agentic-Tx." + }, + { + "type": "table", + "bbox": [ + 0.121, + 0.204, + 0.878, + 0.825 + ], + "angle": 0, + "content": "
Tool NameDescription
ToxCastUses TxGemma to predict the toxicity of a given drug (SMILES string) in various ToxCast assays based on the provided context. Returns results indicating whether the drug is toxic or not in each selected assay.
ClinicalToxUses TxGemma to predict the clinical toxicity of a given drug (SMILES string) for humans. Returns a result indicating whether the drug is predicted to be toxic or not.
ChatAllows conversational interaction with TxGemma-Chat. Enables posing therapeutics-related questions and receiving responses.
MutagenicityUses TxGemma to predict whether a given drug (SMILES) is mutagenic based on the Ames test. Returns a result indicating if the drug is mutagenic or not.
IC50Uses TxGemma to predict the normalized IC50 between a drug (SMILES) and a target protein (amino acid sequence). Returns a IC50 value, with lower values suggesting potent inhibition.
Phase 1 TrialUses TxGemma to predict the approval outcome of a Phase 1 clinical trial for a drug (SMILES) against a specified disease. Returns a result indicating whether the trial would be approved or not.
Wikipedia SearchSearches Wikipedia for a given text query. Returns the top matching article's title, link, and a short summary.
PubMed SearchQueries PubMed for scientific articles based on a search text. Returns metadata (PMID, title, authors, journal, date, abstract) for the top few articles.
Web SearchPerforms a general web search. Returns titles, links, and snippets for the top search results.
HTML FetchFetched the raw HTML content of a given URL. Useful for inspecting webpage details.
SMILES to DescriptionRetrieves molecular information from PubChem for a given SMILES string. Returns properties like PubChem CID, molecular formula, IUPAC name, XLogP, and synonyms.
SMILES TherapyRetrieves therapeutic information (ChEMBL ID, mechanisms of action, drug indications, ATC classifications) for a drug given its SMILES string.
Molecule ToolProvides molecule-related functions: searching for compounds by name (returns properties and IDs) and converting between molecular representations (InChI, SMILES, InChIKey, Mol).
Molecule ConvertConverts a molecules representation from one type to another (e.g., SMILES to InChI).
Gene SequenceRetrieves amino acid sequences for a given gene name and organism. Searches NCBI Nucleotide, fetches records, and translates DNA to protein sequences.
Gene DescriptionRetrieves descriptive information about a gene from NCBI Gene, including official symbol, full name, description, and summary.
BlastPRuns a BLASTP search against NCBI databases for a given amino acid sequence. Returns hits with gene names, organisms, and accessions.
Protein DescriptionProvides descriptive information (organism, definition, accession) for a protein, either by name or amino acid sequence. Uses NCBI Protein database or BLASTP.
" + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.884, + 0.949 + ], + "angle": 0, + "content": "38" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.111, + 0.091, + 0.318, + 0.106 + ], + "angle": 0, + "content": "D Additional results" + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.123, + 0.437, + 0.14 + ], + "angle": 0, + "content": "D.1 TxGemma-Predict performance" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.149, + 0.889, + 0.41 + ], + "angle": 0, + "content": "Figure S.4 compares TxGemma-27B-Predict with previous SOTA models, taking into account that Tx-LLM M achieved SOTA performance on many tasks. We provide detailed results tables for binary classification tasks in Table S.13 (comparing against specialist SOTA and base models) and Table S.15 (comparing against TxGemma-Chat and Tx-LLM), and for regression and generation tasks in Table S.14 (comparing against specialist SOTA and base models) and Table S.16 (comparing against TxGemma-Chat and Tx-LLM). Tables S.17 and S.18 list the performances of released TxGemma models trained only on datasets with commercial licenses. Figures S.5 and S.6 compares TxGemma-27B-Predict with LlaSMol and MolE, models specialized for small molecules, on small molecule tasks. Figure S.12 plots the percentage of tasks that contain contaminated datapoints overlapping with the Gemma-2 pretraining data, the percent of contaminated datapoints for these tasks, and Figure S.13 shows the results of TxGemma-27B-Predict after filtering contaminated datapoints out. We observe that most tasks have no contamination, and filtering these datapoints out does not negatively impact TxGemma-27B-Predict performance. Figure S.16 plots performances for particular feature types across multiple model sizes, showing that the integration of SMILES strings and textual information is consistent. Figure S.17 plots performances over all tasks for comparisons of model size and domain fine-tuning, showing that these variables are significant. Figure S.18 shows that TxGemma-27B-Predict toxicity and clinical trial approval predictions are correlated, likely because toxicity in an important component of trial approval. Figure S.11 plots the inference speed, normalized by the number of chips used for serving, for all model sizes." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.421, + 0.744, + 0.438 + ], + "angle": 0, + "content": "D.2 Conversing with TxGemma-27B-Predict and TxGemma-27B-Chat" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.447, + 0.889, + 0.631 + ], + "angle": 0, + "content": "Figure S.8 illustrates an example of providing a prompt to TxGemma-27B-Predict that is not in the processed data format. TxGemma-27B-Predict is able to provide a coherent response in a manner similar to the general LLMs. Figure S.9 illustrates an example of first providing a prompt to TxGemma-27B-Predict in the processed format and asking follow-up questions in subsequent turns. In the second turn, instructing the model to not in the processed data format is able to elicit a reasonable but succinct response. However, the third turn leads to the model answering in the processed data format, highlighting the difficulty of multi-turn dialogue after training only on the processed TDC data. Figure S.7 plots the performance of TxGemma-27B-Chat on the MMLU benchmark in comparison with both Gemma-2-27B and TxGemma-27B-Predict. TxGemma-27B-Chat performs similarly to Gemma-2-27B on MMLU while TxGemma-27B-Predict scores much lower. Figure S.10 shows an example of using a specific prompting structure with TxGemma-27B-Chat to elicit reasoning on a more challenging task of clinical trial approval. If this prompting structure is not used, the model refuses to provide reasoning." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.643, + 0.421, + 0.659 + ], + "angle": 0, + "content": "D.3 Agentic-Tx Tool Use Analysis" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.669, + 0.886, + 0.715 + ], + "angle": 0, + "content": "Figure S.14 shows the tool usage frequency for different benchmarks, illustrating that Agentic-Tx dynamically adjusts its tool usage to suit the problem. Figure S.15 shows the most frequent tools used per question for chemical preference questions, showing consistent usage of molecule-based tools." + }, + { + "type": "title", + "bbox": [ + 0.11, + 0.727, + 0.822, + 0.744 + ], + "angle": 0, + "content": "D.4 Proof-of-concept use of TxGemma for end-to-end therapeutic development" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.753, + 0.886, + 0.83 + ], + "angle": 0, + "content": "In Figure S.3, we illustrate a simplified example of how TxGemma might be helpful in identifying a drug for ovarian cancer. In this example, we chose to directly prompt TxGemma, rather than using Agentic-Tx, to strictly isolate potential information leakage introduced by web search, which is outside of our training data. This approach allows us to examine the model's inherent capabilities, though we acknowledge that a full agent-based workflow is a plausible extension." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.835, + 0.886, + 0.911 + ], + "angle": 0, + "content": "We initially use the DisGeNET prompt to identify an ovarian cancer-associated target gene from a short list of genes including PIK3CA, JAK2, RET. TxGemma-27B-Predict predicts that PIK3CA, a gene not found in the training set which is known to be mutated in ovarian cancer [2], has an association score of 0.7 with ovarian cancer. This association score is nearly 2.5 standard deviations above the mean score (\\(\\mu = 0.37\\), \\(\\sigma = 0.13\\)), indicating a strong association. JAK2 and RET share an association score of 0.3 which is below" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.885, + 0.95 + ], + "angle": 0, + "content": "39" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.09, + 0.888, + 0.243 + ], + "angle": 0, + "content": "the mean score. We then used TxGemma-27B-Predict to select a potential therapeutic from a molecule shortlist, prioritizing predicted \\(\\mathrm{IC}_{50}\\) against the E545K mutant (an oncogenic mutation [3]), toxicity, and clinical trial success. Our manually curated shortlist of drugs, unseen to the model during training, include two existing cancer therapies including alpelisib and afatinib and a novel molecule which we randomly generated. Both afatinib \\((1.02\\mu \\mathrm{M}\\mathrm{IC}_{50})\\) and the novel molecule \\((10.2\\mu \\mathrm{M}\\mathrm{IC}_{50})\\) exhibit high predicted \\(\\mathrm{IC}_{50}\\) values, suggesting weak inhibition. However, alpelisib has a predicted \\(\\mathrm{IC}_{50}\\) of \\(30~\\mathrm{nM}\\), suggestive of potent inhibition and relatively close to the experimental value of \\(5\\mathrm{nM}\\) suggested by Chen et al. [4] and Fritsch et al. [5]. TxGemma-27B-Predict also predicts that alpelisib is not mutagenic and would pass a phase 1 clinical trial for ovarian cancer. This iterative evaluation also corroborated by existing evidence: alpelisib is approved for breast cancer [6] and has shown activity in ovarian cancer [7, 8, 9]." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.248, + 0.885, + 0.37 + ], + "angle": 0, + "content": "This workflow demonstrates a proof-of-concept for TxGemma's application in automating and optimizing therapeutic selection. We anticipate an agentic system capable of generating comprehensive lists of potential therapies and gene-disease associations paired with TxGemma would enable rapid prioritization and filtering, helping in reducing the candidate pool and accelerating the transition to preclinical studies. However, it's crucial to acknowledge the limitations of this demonstration. Clinical trial predictions are limited to Phase 1 success, and mutagenicity predictions do not encompass all aspects of small molecule toxicity. Future work should include experimental validation of TxGemma predictions and consideration of additional toxicity factors, such as hematologic toxicity, which were not included in our data." + }, + { + "type": "title", + "bbox": [ + 0.129, + 0.4, + 0.316, + 0.425 + ], + "angle": 0, + "content": "TxGemma: Gene-Disease Association" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.433, + 0.32, + 0.469 + ], + "angle": 0, + "content": "Q: Predict association from O to 1 between the following gene and malignant neoplasm of ovary." + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.473, + 0.245, + 0.482 + ], + "angle": 0, + "content": "PIK3CA:MPPRPSSGELW" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.49, + 0.298, + 0.518 + ], + "angle": 0, + "content": "A: PI3KCA has an association score of 0.7. 0.7 is 2.5a above the mean score" + }, + { + "type": "title", + "bbox": [ + 0.13, + 0.544, + 0.298, + 0.57 + ], + "angle": 0, + "content": "TxGemma: Drug-Target Interaction" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.577, + 0.34, + 0.603 + ], + "angle": 0, + "content": "Q: Given the following gene, predict how effectively the drug will inhibit it." + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.61, + 0.278, + 0.628 + ], + "angle": 0, + "content": "PI3KCA E545K: MPPRSPSGELW... \nAlpelisib: C1-Cc(SC(-N)NC...)" + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.637, + 0.261, + 0.653 + ], + "angle": 0, + "content": "A: Alpelisib has a IC5O of 30 nM." + }, + { + "type": "image", + "bbox": [ + 0.352, + 0.419, + 0.671, + 0.6 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.679, + 0.401, + 0.832, + 0.427 + ], + "angle": 0, + "content": "TxGemma: Clinical Trial Approval" + }, + { + "type": "text", + "bbox": [ + 0.679, + 0.433, + 0.874, + 0.469 + ], + "angle": 0, + "content": "Q: Predict whether the following drug will pass a phase I clinical trial against malignant neoplasm of ovary." + }, + { + "type": "title", + "bbox": [ + 0.679, + 0.477, + 0.812, + 0.486 + ], + "angle": 0, + "content": "Alpelisib:CC1=C(SC(=N1)NC..." + }, + { + "type": "text", + "bbox": [ + 0.679, + 0.495, + 0.725, + 0.511 + ], + "angle": 0, + "content": "A: Approved." + }, + { + "type": "title", + "bbox": [ + 0.68, + 0.544, + 0.812, + 0.57 + ], + "angle": 0, + "content": "TxGemma: Toxicity Prediction" + }, + { + "type": "text", + "bbox": [ + 0.68, + 0.578, + 0.84, + 0.604 + ], + "angle": 0, + "content": "Q: Predict whether the following drug is mutagenic." + }, + { + "type": "title", + "bbox": [ + 0.681, + 0.611, + 0.815, + 0.62 + ], + "angle": 0, + "content": "Alpelisib:CC1=C(SC(=N1)NC..." + }, + { + "type": "text", + "bbox": [ + 0.681, + 0.628, + 0.748, + 0.646 + ], + "angle": 0, + "content": "A: Not mutagenic." + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.709, + 0.888, + 0.83 + ], + "angle": 0, + "content": "Figure S.3 | Proof-of-concept example of applying TxGemma to end-to-end therapeutic development. TxGemma is used to suggest a therapeutic for ovarian cancer by first identifying PIK3CA as an associated gene target from a list of possible genes. Then, from a list of candidate therapeutics, TxGemma predicts that alpelisib (a molecule previously unseen to TxGemma that has shown activity against ovarian cancer and is approved for breast cancer) would bind the E545K mutant of PIK3CA, that it would not be toxic/mutagenic, and that it would be approved in a clinical trial. Note that this example serves as a proof-of-concept demonstration and does not account for all aspects of efficacy, toxicity, or trial approval. Rigorous experimental validation of TxGemma predictions to completely new therapeutics is also a critical step to evaluating TxGemma and remains an area of future work." + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "40" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.193, + 0.089, + 0.778, + 0.311 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.194, + 0.322, + 0.777, + 0.567 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.194, + 0.575, + 0.777, + 0.784 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.312, + 0.793, + 0.404, + 0.803 + ], + "angle": 0, + "content": "Multi-instance tasks" + }, + { + "type": "image_caption", + "bbox": [ + 0.559, + 0.793, + 0.726, + 0.803 + ], + "angle": 0, + "content": "Single-instance and generative tasks" + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.822, + 0.887, + 0.913 + ], + "angle": 0, + "content": "Figure S.4 | Performance of TxGemma-27B-Predict compared to generalist and specialist SOTA models (top) The median relative change in performance of TxGemma-27B-Predict compared to Tx-LLM M. (middle) The median relative change in performance of TxGemma-27B-Predict compared to specialist SOTA models. (bottom) The median relative change in performance of TxGemma-27B-Predict compared to all SOTA models, including both Tx-LLM M and specialist models. Multi-instance tasks indicate tasks that involve multiple features, whereas single-instance tasks only involve one feature. The tasks within each task type are defined in Tables S.2 and S.3." + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.884, + 0.949 + ], + "angle": 0, + "content": "41" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.309, + 0.167, + 0.446, + 0.318 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.453, + 0.168, + 0.681, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.339, + 0.889, + 0.385 + ], + "angle": 0, + "content": "Figure S.5 | TxGemma performs comparably to LlaSMol on small molecule tasks. Accuracy is reported for binary classification tasks, and RMSE is reported for regression tasks. BBBP corresponds to BBB Martins in TDC tasks, ESOL corresponds to Solubility AqSolDB, and Lipo corresponds to Lipophilicity AstraZeneca." + }, + { + "type": "image", + "bbox": [ + 0.119, + 0.553, + 0.315, + 0.747 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.306, + 0.553, + 0.498, + 0.729 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.553, + 0.688, + 0.747 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.685, + 0.553, + 0.88, + 0.73 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.782, + 0.889, + 0.829 + ], + "angle": 0, + "content": "Figure S.6 | TxGemma performs comparably to MolE on small molecule tasks. Comparison of MolE with TxGemma-27B-Predict on TDC tasks, separated by metric type (MAE, AUROC, Spearman correlation, and AUPRC). TxGemma-27B-Predict performs better than MolE on 10 out of 22 tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "42" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.203, + 0.885, + 0.233 + ], + "angle": 0, + "content": "Table S.13 | Model performance on binary classification tasks. TxGemma-Predict and Gemma-2 performances compared with specialist SOTA for each binary classification task, along with the metric type." + }, + { + "type": "table", + "bbox": [ + 0.062, + 0.237, + 0.937, + 0.807 + ], + "angle": 0, + "content": "
Task NameMetricSpecialist SOTAGemma-2-2BGemma-2-9BGemma-2-27BTxGemma-2B-PredictTxGemma-9B-PredictTxGemma-27B-Predict
AMESAUROC0.871 [10]0.4870.6050.5080.7960.7980.816
BBB MartinsAUROC0.915 [11]0.2500.6450.5460.8640.8740.907
Bioavailability MaAUROC0.748 [12]0.4790.5840.5790.7150.6550.696
CYP1A2 VeithAUPRC0.900 [13]0.3880.5330.5620.9100.9160.922
CYP2C19 VeithAUROC0.890 [13]0.4560.5950.6190.9050.9060.899
CYP2C9 Substrate CarbonMangelsAUPRC0.441 [10]0.2930.3360.3670.4570.4680.427
CYP2C9 VeithAUPRC0.839 [14]0.2830.3740.4170.8010.7990.798
CYP2D6 Substrate CarbonMangelsAUPRC0.736 [14]0.2330.3290.3860.6050.6030.706
CYP2D6 VeithAUPRC0.739 [14]0.1450.1660.1850.6370.6640.681
CYP3A4 Substrate CarbonMangelsAUROC0.662 [15]0.5140.5850.5960.6690.6220.690
CYP3A4 VeithAUPRC0.904 [14]0.4270.5310.5350.8440.8390.854
Carcinogens LaguninAccuracy0.770 [16]0.2500.2860.3390.8210.8390.857
ClinToxAUROC0.948 [17]0.4370.4820.4240.8100.8310.888
DILIAUROC0.925 [10]0.3200.6510.6270.8750.8480.887
HIA HouAUROC0.988 [18]0.2570.9320.7830.9370.9670.988
HIVAUROC0.851 [19]0.4910.4950.5370.7370.7340.764
HuRIAUPRC0.724 [20]0.4960.4840.5260.7510.7790.799
MHC1 IEDB IMGT NielsenAUROC0.986 [21]0.4980.5040.5170.9100.9270.929
MHC2 IEDB JensenAUROC0.940 [22]0.4980.5260.5440.8120.8500.851
PAMPA NCATSAUROC0.900 [23]0.4650.5830.5440.6420.6710.705
Pgp BroccatelliAUROC0.935 [10]0.4160.6700.4970.9000.9110.936
SARSCOV2 3CLPro DiamondAUROC0.800 [24]0.3010.3880.4770.7330.7080.769
SARSCoV2 Vitro TouretAUROC0.640 [25]0.5680.6110.4790.6500.6680.598
SAbDab ChenAUPRC0.510 [26]0.5320.6960.7010.6760.8070.767
Skin ReactionAUROC0.840 [27]0.4290.5460.4930.6710.6480.708
Tox21AUROC0.961 [28]0.3580.4360.4970.8810.8960.893
ToxCastAUROC0.777 [17]0.4850.5120.5580.7840.7670.800
butkiewiczAUROC0.840 [29]0.4570.4910.4910.7910.7720.831
hERGAUROC0.874 [12]0.5380.6390.5000.8760.8810.884
hERG KarimAccuracy0.770 [30]0.5290.5320.5220.7780.7940.774
herg centralAUROC0.860 [31]0.4810.5110.5170.8800.8610.896
miRTarBaseAccuracy0.804 [32]0.4980.5010.4980.8050.8290.801
phase1AUROC0.576 [33]0.5620.5620.5530.6420.6350.622
phase2AUROC0.645 [33]0.5430.5710.5310.6650.6680.676
phase3AUROC0.723 [33]0.5590.5670.5590.7310.7290.739
weberAUROC0.870 [34]0.4660.5860.4690.7300.7270.749
" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.884, + 0.949 + ], + "angle": 0, + "content": "43" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.239, + 0.887, + 0.283 + ], + "angle": 0, + "content": "Table S.14 | Model performance on regression and generation tasks. TxGemma-Predict and Gemma-2 performances compared with specialist SOTA for each regression and generation task, along with the metric type. Tasks for which we did not find a specialist SOTA value are indicated with N/A." + }, + { + "type": "table", + "bbox": [ + 0.093, + 0.291, + 0.907, + 0.769 + ], + "angle": 0, + "content": "
Task NameMetricSpecialist SOTAGemma-2-2BGemma-2-9BGemma-2-27BTxGemma-2B-PredictTxGemma-9B-PredictTxGemma-27B-Predict
BindingDB PatentPCC0.588 [35]-0.066-0.0390.0300.4220.5240.538
BindingDB ic50Spearman0.637 [36]0.0010.0020.0440.3990.3980.445
BindingDB kdPCC0.712 [37]0.197-0.0090.1190.3520.3700.456
BindingDB kiPCC0.840 [38]-0.018-0.053-0.0270.6610.7370.676
Buchwald HartwigPCC0.786 [39]0.5280.6360.6840.8610.9150.910
Caco2 WangMAE0.285 [18]1.0570.5330.6180.4760.3730.401
Clearance Hepatocyte AZSpearman0.440 [40]0.1410.1630.2140.3530.3380.259
Clearance Microsome AZSpearman0.625 [18]0.2390.3250.2940.4680.6230.462
DAVISMSE0.219 [41]2.7059.0544.4730.6010.5870.555
DisGeNETMAEN/A0.2940.2950.2770.0570.0540.054
DrugComb BlissMAE4.560 [42]8.2137.4136.4564.2304.3374.156
DrugComb CSSMAE16.858 [42]36.84733.83722.61415.75216.48015.000
DrugComb HSAMAE4.453 [42]7.4587.3656.6704.2314.3354.209
DrugComb LoeweMAE9.184 [42]13.87313.36914.73117.34218.66517.336
DrugComb ZIPMAE4.027 [42]8.5886.2265.4043.9503.9043.807
GDSC1PCC0.860 [43]-0.0410.0730.0930.8760.5450.892
GDSC2PCC0.860 [43]-0.043-0.0370.0860.8240.5390.912
Half Life ObachSpearman0.547 [44]0.2880.2840.4850.3860.4940.458
KIBAMSE0.154 [41]2.8871.9252.0160.5880.5480.633
LD50 ZhuMAE0.552 [18]1.9710.8960.8740.7100.6300.628
LeenaySpearman0.740 [45]0.0850.0910.1460.0970.0670.276
Lipophilicity AstraZenecaMAE0.467 [46]1.5061.2071.0320.6100.5650.539
OncoPolyPharmacologyPCC0.730 [47]-0.0400.0640.0720.4730.5180.540
PPBR AZMAE7.788 [46]10.8369.7689.8799.2668.8899.029
Protein SAbDabMAEN/A1.2801.1701.1631.0661.1061.210
Solubility AqSolDBMAE0.761 [46]4.2142.5493.0960.9610.8680.821
TAPMAEN/A5.0084.2413.9585.3014.4734.280
USPTOAccuracy0.415 [48]0.0000.0010.0000.2870.0970.084
USPTO YieldsPCC0.361 [39]-0.0150.0260.0640.0110.0310.395
VDss LombardoSpearman0.627 [49]0.1000.4130.3540.5640.6070.560
" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.885, + 0.95 + ], + "angle": 0, + "content": "44" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.197, + 0.885, + 0.226 + ], + "angle": 0, + "content": "Table S.15 | Model performance on binary classification tasks. TxGemma-Predict, TxGemma-Chat, and Tx-LLM performances for each binary classification task, along with the metric type." + }, + { + "type": "table", + "bbox": [ + 0.073, + 0.234, + 0.928, + 0.796 + ], + "angle": 0, + "content": "
Task NameMetricTxGemma-9B-PredictTxGemma-27B-PredictTxGemma-9B-ChatTxGemma-27B-ChatTx-LLM STx-LLM M
AMESAUROC0.7980.8160.7210.7330.7850.786
BBB MartinsAUROC0.8740.9070.8110.8610.8050.882
Bioavailability MaAUROC0.6550.6960.6200.6590.6050.702
CYP1A2 VeithAUPRC0.9160.9220.8390.8230.9060.914
CYP2C19 VeithAUROC0.9060.8990.8370.8280.8770.895
CYP2C9 Substrate CarbonMangelsAUPRC0.4680.4270.3820.4270.4030.436
CYP2C9 VeithAUPRC0.7990.7980.6670.6820.7500.788
CYP2D6 Substrate CarbonMangelsAUPRC0.6030.7060.5490.7000.6430.600
CYP2D6 VeithAUPRC0.6640.6810.5040.4350.6050.659
CYP3A4 Substrate CarbonMangelsAUROC0.6220.6900.6420.6660.6370.647
CYP3A4 VeithAUPRC0.8390.8540.7490.7500.8000.840
Carcinogens LaguninAccuracy0.8390.8570.8930.9110.8570.786
ClinToxAUROC0.8310.8880.7110.6370.8180.863
DILIAUROC0.8480.8870.6880.7660.7270.882
HIA HouAUROC0.9670.9880.8720.8970.9420.990
HIV*AUROC0.7340.7640.6120.5820.6860.732
HuRIAUPRC0.7790.7990.6280.6210.7050.753
MHC1 IEDB IMGT NielsenAUROC0.9270.9290.8750.8250.9130.907
MHC2 IEDB JensenAUROC0.8500.8510.7240.6830.7810.863
PAMPA NCATSAUROC0.6710.7050.7350.6640.6460.668
Pgp BroccatelliAUROC0.9110.9360.8990.9120.9090.939
SARSCOV2 3CLPro DiamondAUROC0.7080.7690.6990.7220.7550.712
SARSCoV2 Vitro TouretAUROC0.6680.5980.5030.5060.5120.601
SAbDab ChenAUPRC0.8070.7670.7020.7190.3900.473
Skin ReactionAUROC0.6480.7080.6380.5430.5640.615
Tox21AUROC0.8960.8930.8070.7970.8580.882
ToxCastAUROC0.7670.8000.7540.7340.7790.792
butkiewiczAUROC0.7720.8310.6290.6190.5740.566
hERGAUROC0.8810.8840.8300.8320.8790.909
hERG KarimAccuracy0.7940.7740.6570.6680.7240.745
herg centralAUROC0.8610.8960.8300.8070.8800.888
miRTarBaseAccuracy0.8290.8010.6790.6440.7650.799
phase1AUROC0.6350.6220.5760.5570.6240.667
phase2AUROC0.6680.6760.6380.6260.6390.676
phase3AUROC0.7290.7390.6830.6680.7010.728
weberAUROC0.7270.7490.6720.6430.7380.743
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.112, + 0.8, + 0.456, + 0.812 + ], + "angle": 0, + "content": "* To predict whether compounds have Anti-HIV properties." + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.884, + 0.949 + ], + "angle": 0, + "content": "45" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.247, + 0.885, + 0.276 + ], + "angle": 0, + "content": "Table S.16 | Model performance on regression and generation tasks. TxGemma-Predict, TxGemma-Chat, and Tx-LLM performances for each regression and generation task, along with the metric type." + }, + { + "type": "table", + "bbox": [ + 0.104, + 0.284, + 0.896, + 0.761 + ], + "angle": 0, + "content": "
Task NameMetricTxGemma-9B-PredictTxGemma-27B-PredictTxGemma-9B-ChatTxGemma-27B-ChatTx-LLM STx-LLM M
BindingDB PatentPCC0.5240.5380.4520.2200.4740.531
BindingDB ic50Spearman0.3980.4450.4120.3620.3260.311
BindingDB kdPCC0.3700.4560.1620.1590.3170.391
BindingDB kiPCC0.7370.6760.4480.2110.5650.726
Buchwald HartwigPCC0.9150.9100.2550.7570.6820.905
Caco2 WangMAE0.3730.4010.6430.3980.6210.432
Clearance Hepatocyte AZSpearman0.3380.2590.1970.1500.2560.385
Clearance Microsome AZSpearman0.6230.4620.3450.4200.3850.413
DAVISMSE0.5870.5550.6080.5610.5640.704
DisGeNETMAE0.0540.0540.0660.0640.0590.057
DrugComb BlissMAE4.3374.1564.5024.5114.4254.104
DrugComb CSSMAE16.48015.00016.38416.90014.74014.057
DrugComb HSAMAE4.3354.2094.4974.5204.3114.118
DrugComb LoeweMAE18.66517.33616.99416.91417.42817.381
DrugComb ZIPMAE3.9043.8074.1394.1414.0473.777
GDSC1PCC0.5450.8920.8610.8020.8760.887
GDSC2PCC0.5390.9120.8640.8230.8960.900
Half Life ObachSpearman0.4940.4580.3300.4140.5250.448
KIBAMSE0.5480.6330.7050.8520.7090.548
LD50 ZhuMAE0.6300.6280.7400.7050.8080.618
LeenaySpearman0.0670.2760.1280.0950.0480.083
Lipophilicity AstraZenecaMAE0.5650.5390.9850.8420.7790.587
OncoPolyPharmacologyPCC0.5180.5400.3590.1930.4180.552
PPBR AZMAE8.8899.02911.36710.89511.1389.108
Protein SAbDabMAE1.1061.2101.2681.1161.4321.268
Solubility AqSolDBMAE0.8680.8211.1591.1330.9310.987
TAPMAE4.4734.2804.8594.0835.0754.983
USPTOAccuracy0.0970.0840.0860.0910.2200.239
USPTO YieldsPCC0.0310.3950.0030.0260.0420.070
VDss LombardoSpearman0.6070.5600.3960.4070.4970.609
" + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.884, + 0.949 + ], + "angle": 0, + "content": "46" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.211, + 0.885, + 0.256 + ], + "angle": 0, + "content": "Table S.17 | Model performance on binary classification tasks for models trained only on datasets with commercial licenses. TxGemma-Predict and TxGemma-Chat performances for each binary classification task, along with the metric type." + }, + { + "type": "table", + "bbox": [ + 0.113, + 0.262, + 0.885, + 0.782 + ], + "angle": 0, + "content": "
Task NameMetricTxGemma-2B-PredictTxGemma-9B-PredictTxGemma-27B-PredictTxGemma-9B-ChatTxGemma-27B-Chat
AMESAUROC0.8120.8030.8260.7230.729
BBB MartinsAUROC0.8830.8490.8990.8320.848
Bioavailability MaAUROC0.6880.6880.7240.6660.625
CYP1A2 VeithAUPRC0.9110.9140.9160.8620.817
CYP2C19 VeithAUROC0.9050.8970.8970.8440.823
CYP2C9 Substrate CarbonMangelsAUPRC0.4170.3900.4600.4140.375
CYP2C9 VeithAUPRC0.7870.8000.7930.7000.685
CYP2D6 Substrate CarbonMangelsAUPRC0.6260.6970.7060.6530.704
CYP2D6 VeithAUPRC0.6660.6620.6770.5170.422
CYP3A4 Substrate CarbonMangelsAUROC0.6380.6800.6920.6440.653
CYP3A4 VeithAUPRC0.8420.8390.8520.7600.747
Carcinogens LaguninAccuracy0.9110.8570.8750.8930.929
ClinToxAUROC0.9170.8150.8840.7160.595
DILIAUROC0.8290.8230.9270.6750.797
HIA HouAUROC0.9840.9540.9900.9060.927
HIVAUROC0.7810.7300.7680.6410.589
HuRIAUPRC0.7350.7670.7970.6850.620
MHC1 IEDB IMGT NielsenAUROC0.9300.9290.9330.8870.826
MHC2 IEDB JensenAUROC0.8550.8520.8550.7330.682
PAMPA NCATSAUROC0.6940.6300.7240.6840.659
Pgp BroccatelliAUROC0.9220.9320.9410.8730.920
SARSCOV2 3CLPro DiamondAUROC0.7480.7990.6760.7160.712
SARSCoV2 Vitro TouretAUROC0.6590.6220.5970.5270.516
SAbDab ChenAUPRC0.7260.7450.7930.5230.731
Skin ReactionAUROC0.6910.6240.7330.6210.571
Tox21AUROC0.8970.8930.8900.8180.797
ToxCastAUROC0.7870.7660.7970.7540.735
butkiewiczAUROC0.8110.7750.8260.6810.606
hERGAUROC0.9020.8900.8940.8550.829
hERG KarimAccuracy0.7780.7960.7720.6490.673
herg centralAUROC0.8900.8600.8920.8420.805
miRTarBaseAccuracy0.8180.8340.8020.6720.649
weberAUROC0.7500.6970.7490.6920.645
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.112, + 0.786, + 0.458, + 0.798 + ], + "angle": 0, + "content": "* To predict whether compounds have Anti-HIV properties." + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "47" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.261, + 0.885, + 0.306 + ], + "angle": 0, + "content": "Table S.18 | Model performance on regression and generation tasks for models trained only on datasets with commercial licenses. TxGemma-Predict and TxGemma-Chat performances for each regression or generation task, along with the metric type." + }, + { + "type": "table", + "bbox": [ + 0.142, + 0.312, + 0.859, + 0.747 + ], + "angle": 0, + "content": "
Task NameMetricTxGemma-2B-PredictTxGemma-9B-PredictTxGemma-27B-PredictTxGemma-9B-ChatTxGemma-27B-Chat
BindingDB PatentPCC0.5560.3760.5370.4380.118
BindingDB ic50Spearman0.4250.3130.4650.4430.361
BindingDB kdPCC0.4900.3930.2890.2070.156
BindingDB kiPCC0.7280.7120.6700.3870.218
Buchwald HartwigPCC0.9200.9180.9030.5740.818
Caco2 WangMAE0.6190.4910.4790.5880.383
Clearance Hepatocyte AZSpearman0.2920.3780.3500.1660.190
Clearance Microsome AZSpearman0.5210.5240.5100.3940.395
DAVISMSE0.5760.5640.5750.5610.561
DrugComb BlissMAE4.0884.2864.1574.4544.519
DrugComb CSSMAE14.56815.37014.92515.96016.649
DrugComb HSAMAE4.0634.2824.1784.4864.529
DrugComb LoeweMAE17.31317.86217.32717.19016.873
DrugComb ZIPMAE3.7373.8483.8234.0934.132
Half Life ObachSpearman0.4230.3480.4910.2690.393
KIBAMSE0.5620.5250.5540.8300.858
LD50 ZhuMAE0.6980.7180.6770.7240.721
LeenaySpearman0.1140.0890.2590.0780.183
Lipophilicity AstraZenecaMAE0.5710.6670.6130.8340.837
OncoPolyPharmacologyPCC0.5560.4370.5310.3880.148
PPBR AZMAE8.8139.1778.79211.00411.025
Protein SAbDabMAE1.1171.0221.0721.3481.173
Solubility AqSolDBMAE0.9111.1850.8021.1601.135
TAPMAE5.4984.8394.0884.6114.444
USPTOAccuracy0.3160.0410.2810.1450.090
USPTO YieldsPCC0.4710.0020.3500.1140.002
VDss LombardoSpearman0.5940.5380.5910.4100.487
" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.884, + 0.949 + ], + "angle": 0, + "content": "48" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.202, + 0.092, + 0.792, + 0.786 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.798, + 0.889, + 0.92 + ], + "angle": 0, + "content": "Figure S.7 | Performance of TxGemma and Gemma-2 models on MMLU. Score of TxGemma-27B-Chat, TxGemma-27B-Predict, and Gemma-2-27B on each subject of the MMLU benchmark [50], a comprehensive suite of 57 diverse tasks spanning mathematics, history, computer science, law, etc. This benchmark evaluates knowledge, reasoning, and problem-solving abilities across a wide range of academic subjects, providing a robust measure of overall language understanding. TxGemma-27B-Chat (73.87%) performs slightly below Gemma-2-27B (75.38%) but significantly better than TxGemma-27B-Predict (53.60%), suggesting that domain fine-tuning purely on therapeutic data can cause loss of information learned during base model training, but simultaneously training with general instruction-tuning data can compensate for this." + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "49" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.163, + 0.294, + 0.349, + 0.304 + ], + "angle": 0, + "content": "Tell me about the blood-brain barrier" + }, + { + "type": "title", + "bbox": [ + 0.28, + 0.333, + 0.338, + 0.344 + ], + "angle": 0, + "content": "TxGemma" + }, + { + "type": "text", + "bbox": [ + 0.28, + 0.356, + 0.826, + 0.397 + ], + "angle": 0, + "content": "The blood-brain barrier (BBB) is a highly selective semipermeable border that separates the circulating blood from the brain and extracellular fluid in the central nervous system (CNS). It plays a crucial role in maintaining a stable environment for the brain, protecting it from harmful substances and pathogens while allowing the passage of essential nutrients." + }, + { + "type": "text", + "bbox": [ + 0.281, + 0.4, + 0.473, + 0.41 + ], + "angle": 0, + "content": "Here are some key features of the BBB:" + }, + { + "type": "text", + "bbox": [ + 0.28, + 0.413, + 0.826, + 0.443 + ], + "angle": 0, + "content": "Cellular composition: The BBB is primarily formed by endothelial cells lining the capillaries in the brain. These cells are tightly connected by specialized tight junctions that restrict the movement of molecules between them." + }, + { + "type": "text", + "bbox": [ + 0.28, + 0.447, + 0.825, + 0.468 + ], + "angle": 0, + "content": "Astrocytes and pericytes: These cells provide structural and functional support to the endothelial cells, contributing to the regulation of BBB permeability." + }, + { + "type": "text", + "bbox": [ + 0.28, + 0.471, + 0.825, + 0.491 + ], + "angle": 0, + "content": "Selective transport mechanisms: The BBB utilizes various transport systems to regulate the passage of molecules across its barrier. These include:" + }, + { + "type": "text", + "bbox": [ + 0.281, + 0.494, + 0.63, + 0.505 + ], + "angle": 0, + "content": "Passive diffusion: Small lipophilic molecules can readily cross the BBB." + }, + { + "type": "text", + "bbox": [ + 0.28, + 0.508, + 0.825, + 0.528 + ], + "angle": 0, + "content": "Facilitated diffusion: Carrier-mediated transport systems assist the movement of specific molecules, such as glucose and amino acids." + }, + { + "type": "text", + "bbox": [ + 0.28, + 0.531, + 0.826, + 0.552 + ], + "angle": 0, + "content": "Active transport: Energy-dependent mechanisms pump molecules against their concentration gradient, such as the removal of toxins from the brain." + }, + { + "type": "text", + "bbox": [ + 0.28, + 0.555, + 0.826, + 0.576 + ], + "angle": 0, + "content": "P-glycoprotein (P-gp): This efflux pump actively removes a wide range of drugs from the brain, including many central nervous system-acting medications." + }, + { + "type": "text", + "bbox": [ + 0.28, + 0.579, + 0.826, + 0.608 + ], + "angle": 0, + "content": "*Impairment of the BBB can lead to various neurological disorders, including stroke, multiple sclerosis, and Alzheimer's disease. It can also allow the entry of harmful substances into the brain, such as pathogens and toxins." + }, + { + "type": "text", + "bbox": [ + 0.28, + 0.612, + 0.825, + 0.633 + ], + "angle": 0, + "content": "*Researchers are actively developing strategies to overcome the BBB for drug delivery to the brain, including using nanocarriers and focused ultrasound." + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.664, + 0.888, + 0.708 + ], + "angle": 0, + "content": "Figure S.8 | Models can converse about general topics after fine-tuning only on the processed TDC data. Example of a dialogue with TxGemma-27B-Predict. When asked a question that is not in a processed TDC data format, the model is able to respond coherently." + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "50" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.364, + 0.271, + 0.629, + 0.282 + ], + "angle": 0, + "content": "Example of TxGemma-27B-Predict Failure in Conversation" + }, + { + "type": "title", + "bbox": [ + 0.227, + 0.301, + 0.265, + 0.31 + ], + "angle": 0, + "content": "Prompt" + }, + { + "type": "text", + "bbox": [ + 0.227, + 0.313, + 0.522, + 0.323 + ], + "angle": 0, + "content": "Instructions: Answer the following question about drug properties." + }, + { + "type": "text", + "bbox": [ + 0.227, + 0.326, + 0.75, + 0.353 + ], + "angle": 0, + "content": "Context: As a membrane separating circulating blood and brain extracellular fluid, the blood-brain barrier (BBB) is the protection layer that blocks most foreign drugs. Thus the ability of a drug to penetrate the barrier to deliver to the site of action forms a crucial challenge in development of drugs for central nervous system." + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.356, + 0.476, + 0.365 + ], + "angle": 0, + "content": "Question: Given a drug SMILES string, predict whether it" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.368, + 0.432, + 0.377 + ], + "angle": 0, + "content": "(A) does not cross the BBB (B) crosses the BBB" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.379, + 0.619, + 0.388 + ], + "angle": 0, + "content": "Drug SMILES: C1CN(CCC1(C=O)NC(CCO)C2=CC=C(C=C2)Cl)N)C3=NC=NC4=C3C=CN4" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.391, + 0.268, + 0.4 + ], + "angle": 0, + "content": "Answer:" + }, + { + "type": "title", + "bbox": [ + 0.228, + 0.418, + 0.367, + 0.427 + ], + "angle": 0, + "content": "Tx-Gemma-Predict Response" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.433, + 0.282, + 0.441 + ], + "angle": 0, + "content": "Answer: (B)" + }, + { + "type": "title", + "bbox": [ + 0.228, + 0.459, + 0.265, + 0.468 + ], + "angle": 0, + "content": "Prompt" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.473, + 0.626, + 0.482 + ], + "angle": 0, + "content": "What properties are useful for crossing the blood-brain barrier? Don't answer with (A) or (B)" + }, + { + "type": "title", + "bbox": [ + 0.228, + 0.499, + 0.367, + 0.508 + ], + "angle": 0, + "content": "Tx-Gemma-Predict Response" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.512, + 0.748, + 0.532 + ], + "angle": 0, + "content": "Answer: Small molecular size, high lipophilicity, lack of ionization, and absence of extensive protein binding are generally favorable for BBB penetration." + }, + { + "type": "title", + "bbox": [ + 0.228, + 0.549, + 0.265, + 0.557 + ], + "angle": 0, + "content": "Prompt" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.561, + 0.56, + 0.57 + ], + "angle": 0, + "content": "Does the drug I gave you have these properties? Don't answer with (A) or (B)" + }, + { + "type": "title", + "bbox": [ + 0.228, + 0.587, + 0.367, + 0.597 + ], + "angle": 0, + "content": "Tx-Gemma-Predict Response" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.601, + 0.282, + 0.61 + ], + "angle": 0, + "content": "Answer: (B)" + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.638, + 0.887, + 0.729 + ], + "angle": 0, + "content": "Figure S.9 | Predictive model struggles to hold a conversation about processed TDC data prompts after fine-tuning only on this data. Example of a multi-turn dialogue with TxGemma-27B-Predict. The initial turn is a question in the processed TDC data format. After the initial turn, the model is prompted to describe some properties associated with the problem of crossing the blood-brain barrier. An additional instruction to avoid answering with (A) or (B) is necessary. The model is able to provide some answers in natural language, but another turn leads to an inappropriate multiple choice answer." + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.883, + 0.949 + ], + "angle": 0, + "content": "51" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.148, + 0.098, + 0.721, + 0.141 + ], + "angle": 0, + "content": "Provide five steps of reasoning to think through the problem and then provide an answer. Let's think step by step. Consider whether the drug would pass phase 1 trials, not whether the drug would be fully approved. You must also discuss the specific drug and disease as part of your reasoning that leads to your answer. This should be emphasized." + }, + { + "type": "text", + "bbox": [ + 0.148, + 0.146, + 0.482, + 0.158 + ], + "angle": 0, + "content": "Instructions: Answer the following question about clinical trials." + }, + { + "type": "text", + "bbox": [ + 0.147, + 0.163, + 0.722, + 0.228 + ], + "angle": 0, + "content": "Context: Clinical trial is the most time and cost-consuming step in the drug discovery process. Phase 1 clinical trials test the safety and basic properties of a new drug or treatment in a small group of people for the first time. Optimizing and designing trials with machine learning could drastically lead to the speedup of delivery of life-saving therapeutics to patients. Clinical trial outcome prediction is a machine learning task that aims to forecast the outcome of clinical trials, such as the approval rate of a drug or treatment. It utilizes various clinical trial features, including the drug's molecular structure and patient disease." + }, + { + "type": "text", + "bbox": [ + 0.148, + 0.232, + 0.548, + 0.254 + ], + "angle": 0, + "content": "Question: Given a drug SMILES string and disease, predict if the phase 1 trial (A) would not be approved (B) would be approved" + }, + { + "type": "text", + "bbox": [ + 0.148, + 0.261, + 0.605, + 0.273 + ], + "angle": 0, + "content": "Drug SMILES: CC1=C(SC(=N1)NC(=O)N2CCCCC2(=O)N)C3=CC(=NC=C3)C(C)(C)(C(F)(F)F" + }, + { + "type": "text", + "bbox": [ + 0.147, + 0.278, + 0.721, + 0.311 + ], + "angle": 0, + "content": "Disease: Malignant neoplasm of ovary: Ovarian cancer is a disease that affects women. In this form of cancer, certain cells in the ovary become abnormal and multiply uncontrollably to form a tumor. The ovaries are the female reproductive organs in which egg cells are produced. In about 90 percent of cases, ..." + }, + { + "type": "text", + "bbox": [ + 0.148, + 0.316, + 0.478, + 0.328 + ], + "angle": 0, + "content": "Remember: (A) would not be approved (B) would be approved" + }, + { + "type": "text", + "bbox": [ + 0.149, + 0.337, + 0.214, + 0.349 + ], + "angle": 0, + "content": "Reasoning:" + }, + { + "type": "title", + "bbox": [ + 0.266, + 0.373, + 0.331, + 0.386 + ], + "angle": 0, + "content": "TxGemma" + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.398, + 0.707, + 0.41 + ], + "angle": 0, + "content": "Here's a step-by-step reasoning process to predict the outcome of the Phase 1 trial:" + }, + { + "type": "title", + "bbox": [ + 0.269, + 0.428, + 0.497, + 0.439 + ], + "angle": 0, + "content": "1. Analyze the Drug's Chemical Structure:" + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.442, + 0.843, + 0.464 + ], + "angle": 0, + "content": "Observation: The SMILES string indicates a complex molecule with multiple aromatic rings, fluorine atoms, and amide groups." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.467, + 0.842, + 0.488 + ], + "angle": 0, + "content": "Reasoning: Such structures often point to potential biological activity, but also raise concerns about potential toxicity and side effects." + }, + { + "type": "title", + "bbox": [ + 0.269, + 0.492, + 0.405, + 0.502 + ], + "angle": 0, + "content": "2. Consider the Disease:" + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.506, + 0.799, + 0.518 + ], + "angle": 0, + "content": "Observation: Ovarian cancer is a serious and often aggressive disease with limited treatment options." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.521, + 0.843, + 0.543 + ], + "angle": 0, + "content": "Reasoning: The unmet medical need is high, meaning there's a greater chance for a new drug to be approved if it shows any promise of efficacy." + }, + { + "type": "title", + "bbox": [ + 0.269, + 0.561, + 0.433, + 0.572 + ], + "angle": 0, + "content": "3. Evaluate Potential Toxicity:" + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.575, + 0.795, + 0.587 + ], + "angle": 0, + "content": "Observation: We don't have specific information about the drug's toxicity profile from the given data." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.589, + 0.841, + 0.612 + ], + "angle": 0, + "content": "Reasoning: Phase 1 trials primarily focus on safety. The drug's chemical structure suggests potential for toxicity, which would need to be carefully assessed in the trial." + }, + { + "type": "title", + "bbox": [ + 0.269, + 0.629, + 0.543, + 0.64 + ], + "angle": 0, + "content": "4. Assess the Likelihood of Demonstrating Safety:" + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.644, + 0.842, + 0.665 + ], + "angle": 0, + "content": "Observation: While the drug's structure raises concerns, there's no information suggesting inherent, severe toxicity that would preclude Phase 1 testing." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.668, + 0.843, + 0.701 + ], + "angle": 0, + "content": "Reasoning: Most drugs with novel structures undergo preclinical testing before reaching Phase 1. If the drug passed those tests, it likely has a reasonable chance of demonstrating acceptable safety in a small group of patients." + }, + { + "type": "title", + "bbox": [ + 0.269, + 0.718, + 0.407, + 0.73 + ], + "angle": 0, + "content": "5. Weighing the Factors:" + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.733, + 0.834, + 0.745 + ], + "angle": 0, + "content": "Observation: We have a potentially effective drug for a serious disease, but with potential toxicity concerns." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.748, + 0.843, + 0.78 + ], + "angle": 0, + "content": "Reasoning: Phase 1 trials are designed to assess safety and dosage. Even if the drug has some side effects, it's likely to be approved to proceed to further testing if it shows any signs of efficacy and the benefits outweigh the risks in the context of a serious disease." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.789, + 0.438, + 0.801 + ], + "angle": 0, + "content": "Answer: (B) would be approved" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.839, + 0.887, + 0.93 + ], + "angle": 0, + "content": "Figure S.10 | Enabling reasoning with TxGemma-Chat on challenging tasks. Example of prompting TxGemma-27B-Chat to explain its prediction for predicting clinical trial approval, a relatively unintuitive task. A prefix is added before the prompt to provide instructions for reasoning, and a reminder is added at the end so the model correctly specifies the option corresponding to its desired answer. Lastly, the \"Answer\" text is changed to \"Reasoning:\" to enable reasoning steps. The reasoning provided by the model is not comprehensive but can provide useful insights into the drug-disease interaction." + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "52" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.27, + 0.107, + 0.722, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.304, + 0.886, + 0.348 + ], + "angle": 0, + "content": "Figure S.11 | Inference speed of TxGemma models. The number of examples inferred per day at different model sizes, normalized by the number of TPUv5e chips used for serving. The PPBR AZ task was used for the benchmarking due to its reasonable size." + }, + { + "type": "image", + "bbox": [ + 0.315, + 0.399, + 0.682, + 0.626 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.312, + 0.636, + 0.681, + 0.8 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.812, + 0.886, + 0.885 + ], + "angle": 0, + "content": "Figure S.12 | Contamination analysis. (top) Out of 66 tasks, \\(23\\%\\) had some datapoints in the test set that were found in the Gemma-2 pretraining data, while \\(77\\%\\) did not. For tasks that had some contaminated datapoints, we plot the percent of the test set that was contaminated. (bottom) Distributions of cosine similarities between SMILES string embeddings and molecular name embeddings. Decoy name embeddings indicate a random different molecule name." + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.884, + 0.949 + ], + "angle": 0, + "content": "53" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.175, + 0.138, + 0.555, + 0.361 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.591, + 0.139, + 0.822, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.375, + 0.889, + 0.452 + ], + "angle": 0, + "content": "Figure S.13 | Model performance after filtering contaminated datapoints. Performance of TxGemma-27B-Predict on both original unfiltered test sets and filtered test sets in which contaminated datapoints were removed. (left) For these tasks, higher values correspond to better models, and the metrics are defined in Tables S.13 and S.14. Error bars are bootstrapped standard errors. (right) For these tasks, lower values correspond to better models, and the metrics (either MAE or MSE) are defined in Tables S.13 and S.14. Error bars are bootstrapped standard errors." + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.563, + 0.506, + 0.768 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.563, + 0.822, + 0.768 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.78, + 0.889, + 0.857 + ], + "angle": 0, + "content": "Figure S.14 | Breakdown of tool-usage frequency for Chemical Preference dataset and HLE dataset. Agentic-Tx adapts its tool usage to reason effectively about different tasks. For Chemical Preference, which requires evaluating drug candidates, the system correctly invokes tools for molecular characterization and safety assessment, such as SMILES description and toxicity prediction. For the Bio+Med task, focused on complex biomedical questions, the agent prioritizes PubMed and Wikipedia, demonstrating reliance on broad knowledge retrieval and synthesis." + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "54" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.118, + 0.093, + 0.885, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.279, + 0.889, + 0.357 + ], + "angle": 0, + "content": "Figure S.15 | Breakdown of tool-usage per question in chemical preference dataset. Marker size represents usage count and corresponds to the number of uses per each tool; blue indicates accuracy increase, light red indicates decrease associated with each tool per question. We observe questions involve up to 8 tool calls. High usage of SMILES description and toxicity prediction correlates with improved performance. This demonstrates Agentic-Tx's adaptive tool selection to meet task requirements and improved performance." + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.379, + 0.365, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.377, + 0.379, + 0.622, + 0.542 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.636, + 0.379, + 0.882, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.563, + 0.889, + 0.653 + ], + "angle": 0, + "content": "Figure S.16 | Ability to combine SMILES and text is independent of model size. Median relative change of TxGemma-27B-Predict, TxGemma-9B-Predict and TxGemma-2B-Predict performance from SOTA for tasks grouped by feature type. The signs were reversed for MAE and MSE metrics because lower MAE and MSE values correspond to better performances. The number of tasks in each feature type is displayed over each bar. In all models, over \\(90\\%\\) of tasks had a median relative performance change greater than -0.2, and SMILES + Text consistently outperformed SOTA." + }, + { + "type": "image", + "bbox": [ + 0.119, + 0.678, + 0.462, + 0.838 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.537, + 0.678, + 0.882, + 0.837 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.855, + 0.889, + 0.901 + ], + "angle": 0, + "content": "Figure S.17 | Ablations of model sizes and model adaptations. (left) Relative performance changes for pairwise comparisons of TxGemma-Predict models (TxGemma-2B-Predict, TxGemma-9B-Predict, TxGemma-27B-Predict). (right) Relative performance changes of TxGemma models compared to their respective base models." + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "55" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.347, + 0.385, + 0.648, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.563, + 0.889, + 0.608 + ], + "angle": 0, + "content": "Figure S.18 | TxGemma predictions show correlations between toxicity and clinical trial approval. Spearman correlation coefficients between toxicity predictions (measured by AMES, DILI, and hERG central) and clinical trial predictions (measured by Phase1, Phase2, and Phase3) on a set of PubChem molecules." + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.885, + 0.949 + ], + "angle": 0, + "content": "56" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.122, + 0.091, + 0.23, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.121, + 0.887, + 0.145 + ], + "angle": 0, + "content": "1. Chen, J., Hu, Y., Wang, Y., Lu, Y., Cao, X., Lin, M., Xu, H., Wu, J., Xiao, C., Sun, J., et al. TrialBench: Multi-modal artificial intelligence-ready clinical trial datasets. arXiv preprint arXiv:2407.00631 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.147, + 0.888, + 0.18 + ], + "angle": 0, + "content": "2. Kuo, K.-T., Mao, T.-L., Jones, S., Veras, E., Ayhan, A., Wang, T.-L., Glas, R., Slamon, D., Velculescu, V. E., Kuman, R. J., et al. Frequent activating mutations of PIK3CA in ovarian clear cell carcinoma. The American journal of pathology 174, 1597-1601 (2009)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.183, + 0.887, + 0.207 + ], + "angle": 0, + "content": "3. Leontiadou, H., Galdadas, I., Athanasiou, C. & Cournia, Z. Insights into the mechanism of the PIK3CA E545K activating mutation using MD simulations. Scientific reports 8, 15544 (2018)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.209, + 0.887, + 0.232 + ], + "angle": 0, + "content": "4. Chen, H., Si, Y., Wen, J., Hu, C., Xia, E., Wang, Y. & Wang, O. P110α inhibitor alpelisib exhibits a synergistic effect with pyrotinib and reverses pyrotinib resistant in HER2+ breast cancer. Neoplasia 43, 100913 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.234, + 0.887, + 0.268 + ], + "angle": 0, + "content": "5. Fritsch, C., Huang, A., Chatenay-Rivauday, C., Schnell, C., Reddy, A., Liu, M., Kauffmann, A., Guthy, D., Erdmann, D., De Pover, A., et al. Characterization of the novel and specific PI3Kα inhibitor NVP-BYL719 and development of the patient stratification strategy for clinical trials. Molecular cancer therapeutics 13, 1117-1129 (2014)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.27, + 0.887, + 0.303 + ], + "angle": 0, + "content": "6. Narayan, P., Prowell, T. M., Gao, J. J., Fernandes, L. L., Li, E., Jiang, X., Qiu, J., Fan, J., Song, P., Yu, J., et al. FDA approval summary: alpelisib plus fulvestrant for patients with HR-positive, HER2-negative, PIK3CA-mutated, advanced or metastatic breast cancer. Clinical Cancer Research 27, 1842-1849 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.305, + 0.887, + 0.34 + ], + "angle": 0, + "content": "7. Passarelli, A., Carbone, V., Pignata, S., Mazzeo, R., Lorusso, D., Scambia, G., Canova, S., Di Palma, T., Tasca, G., Mantiero, M., et al. Alpelisib for PIK3CA-mutated advanced gynecological cancers: first clues of clinical activity. *Gynecologic Oncology* 183, 61-67 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.342, + 0.887, + 0.366 + ], + "angle": 0, + "content": "8. Thibault, B., Thole, A., D'Angelo, R., Basset, C. & Guillermet-Guibert, J. PI3Kα-specific inhibitor BYL-719 synergizes with cisplatin in vitro in PIK3CA-mutated ovarian cancer cells. Scientific Reports 15, 6265 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.368, + 0.887, + 0.401 + ], + "angle": 0, + "content": "9. Hu, X., Xia, M., Wang, J., Yu, H., Chai, J., Zhang, Z., Sun, Y., Su, J. & Sun, L. Dual PI3K/mTOR inhibitor PKI-402 suppresses the growth of ovarian cancer cells by degradation of Mcl-1 through autophagy. Biomedicine & Pharmacotherapy 129, 110397 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.403, + 0.887, + 0.427 + ], + "angle": 0, + "content": "10. Turon, G., Hlozek, J., Woodland, J. G., Kumar, A., Chibale, K. & Duran-Frigola, M. First fully-automated AI/ML virtual screening cascade implemented at a drug discovery centre in Africa. Nature Communications 14, 5736 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.429, + 0.887, + 0.451 + ], + "angle": 0, + "content": "11. Fontenot, R., Kathad, U., McDermott, J., Sturtevant, D., Sharma, P. & Carr, P. Predicting a Compounds Blood-Brain-Barrier Permeability with Lantern Pharma's AI and ML Platform, RADR 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.453, + 0.694, + 0.467 + ], + "angle": 0, + "content": "12. Bera, S., Dent, J., Gill, G., Stolman, A. & Wu, B. SimGCN for TDC Benchmarks (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.47, + 0.887, + 0.494 + ], + "angle": 0, + "content": "13. Plonka, W., Stork, C., Šićho, M. & Kirchmair, J. CYPlebrity: Machine learning models for the prediction of inhibitors of cytochrome P450 enzymes. Bioorganic & medicinal chemistry 46, 116388 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.496, + 0.887, + 0.519 + ], + "angle": 0, + "content": "14. Hu, W., Liu, B., Gomes, J., Zitnik, M., Liang, P., Pande, V. & Leskovec, J. Strategies for pre-training graph neural networks. arXiv preprint arXiv:1905.12265 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.521, + 0.887, + 0.544 + ], + "angle": 0, + "content": "15. Huang, K., Fu, T., Glass, L. M., Zitnik, M., Xiao, C. & Sun, J. DeepPurpose: a deep learning library for drug-target interaction prediction. Bioinformatics 36, 5545-5547 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.546, + 0.887, + 0.569 + ], + "angle": 0, + "content": "16. Lagunin, A., Filimonov, D., Zakharov, A., Xie, W., Huang, Y., Zhu, F., Shen, T., Yao, J. & Poroikov, V. Computer-aided prediction of rodent carcinogenicity by PASS and CISOC-PSCT. QSAR & Combinatorial Science 28, 806-810 (2009)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.571, + 0.887, + 0.595 + ], + "angle": 0, + "content": "17. Li, P., Li, Y., Hsieh, C.-Y., Zhang, S., Liu, X., Liu, H., Song, S. & Yao, X. TrimNet: learning molecular representation from triplet messages for biomedicine. Briefings in Bioinformatics 22, bbaa266 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.597, + 0.887, + 0.62 + ], + "angle": 0, + "content": "18. Huang, D., Chowdhuri, S. R., Li, A., Li, A., Agrawal, A., Gano, K. & Zhu, A. A Unified System for Molecular Property Predictions: Oloren ChemEngine and its Applications (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.622, + 0.887, + 0.635 + ], + "angle": 0, + "content": "19. Li, J., Cai, D. & He, X. Learning graph-level representation for drug discovery. arXiv preprint arXiv:1709.03741 (2017)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.637, + 0.887, + 0.66 + ], + "angle": 0, + "content": "20. Raimondi, D., Simm, J., Arany, A. & Moreau, Y. A novel method for data fusion over entity-relation graphs and its application to protein-protein interaction prediction. Bioinformatics 37, 2275-2281 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.662, + 0.887, + 0.695 + ], + "angle": 0, + "content": "21. Gfeller, D., Schmidt, J., Croce, G., Guillaume, P., Bobisse, S., Genolet, R., Queiroz, L., Cesbron, J., Racle, J. & Harari, A. Improved predictions of antigen presentation and TCR recognition with MixMHCpred2. 2 and PRIME2. 0 reveal potent SARS-CoV-2 CD8+ T-cell epitopes. Cell Systems 14, 72-83 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.698, + 0.887, + 0.731 + ], + "angle": 0, + "content": "22. Motmaen, A., Dauparas, J., Baek, M., Abedi, M. H., Baker, D. & Bradley, P. Peptide-binding specificity prediction using fine-tuned protein structure prediction networks. Proceedings of the National Academy of Sciences 120, e2216697120 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.734, + 0.887, + 0.757 + ], + "angle": 0, + "content": "23. Siramshetty, V., Williams, J., Nguyen, D., Neyra, J., Southall, N., Mathé, E., Xu, X. & Shah, P. Validating ADME QSAR models using marketed drugs. SLAS DISCOVERY: Advancing the Science of Drug Discovery 26, 1326-1336 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.759, + 0.887, + 0.782 + ], + "angle": 0, + "content": "24. Haneczok, J. & Delijewski, M. Machine learning enabled identification of potential SARS-CoV-2 3CLpro inhibitors based on fixed molecular fingerprints and Graph-CNN neural representations. Journal of Biomedical Informatics 119, 103821 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.785, + 0.887, + 0.808 + ], + "angle": 0, + "content": "25. Liu, Y., Wu, Y., Shen, X. & Xie, L. COVID-19 multi-targeted drug repurposing using few-shot learning. Frontiers in Bioinformatics 1, 693177 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.81, + 0.887, + 0.833 + ], + "angle": 0, + "content": "26. Chen, X., Dougherty, T., Hong, C., Schibler, R., Zhao, Y. C., Sadeghi, R., Matasci, N., Wu, Y.-C. & Kerman, I. Predicting antibody developability from sequence using machine learning. *biorxiv*, 2020-06 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.835, + 0.887, + 0.869 + ], + "angle": 0, + "content": "27. Alves, V. M., Muratov, E., Fourches, D., Strickland, J., Kleinstreuer, N., Andrade, C. H. & Tropsha, A. Predicting chemically-induced skin reactions. Part I: QSAR models of skin sensitization and their application to identify potentially hazardous compounds. Toxicology and applied pharmacology 284, 262-272 (2015)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.871, + 0.887, + 0.895 + ], + "angle": 0, + "content": "28. Shermukhamedov, S., Mamurjonova, D. & Probst, M. Structure to Property: Chemical Element Embeddings and a Deep Learning Approach for Accurate Prediction of Chemical Properties. arXiv preprint arXiv:2309.09355 (2023)." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.121, + 0.888, + 0.895 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.938, + 0.884, + 0.949 + ], + "angle": 0, + "content": "57" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.092, + 0.888, + 0.116 + ], + "angle": 0, + "content": "29. Vu, O., Mendenhall, J., Altarawy, D. & Meiler, J. BCL.: Mol2D—a robust atom environment descriptor for QSAR modeling and lead optimization. Journal of computer-aided molecular design 33, 477–486 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.117, + 0.888, + 0.142 + ], + "angle": 0, + "content": "30. Karim, A., Lee, M., Balle, T. & Sattar, A. CardioTox net: a robust predictor for hERG channel blockade based on deep learning meta-feature ensembles. Journal of Cheminformatics 13, 1-13 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.143, + 0.887, + 0.166 + ], + "angle": 0, + "content": "31. Korotcov, A., Tkachenko, V., Russo, D. P. & Ekins, S. Comparison of deep learning with multiple machine learning methods and metrics using diverse drug discovery data sets. Molecular pharmaceutics 14, 4462-4475 (2017)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.168, + 0.887, + 0.192 + ], + "angle": 0, + "content": "32. Wong, L., You, Z.-H., Guo, Z.-H., Yi, H.-C., Chen, Z.-H. & Cao, M.-Y. MIPDH: a novel computational model for predicting microRNA-mRNA interactions by DeepWalk on a heterogeneous network. ACS omega 5, 17022-17032 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.193, + 0.887, + 0.217 + ], + "angle": 0, + "content": "33. Fu, T., Huang, K., Xiao, C., Glass, L. M. & Sun, J. Hint: Hierarchical interaction network for clinical-trial-outcome predictions. *Patterns* 3 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.218, + 0.887, + 0.242 + ], + "angle": 0, + "content": "34. Weber, A., Born, J. & Rodriguez Martínez, M. TITAN: T-cell receptor specificity prediction with bimodal attention networks. Bioinformatics 37, i237-i244 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.243, + 0.887, + 0.279 + ], + "angle": 0, + "content": "35. Lam, H. T., Sbodio, M. L., Galindo, M. M., Zayats, M., Fernandez-Diaz, R., Valls, V., Picco, G., Ramis, C. B. & Lopez, V. Otter-Knowledge: benchmarks of multimodal knowledge graph representation learning from different sources for drug discovery. arXiv preprint arXiv:2306.12802 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.28, + 0.887, + 0.314 + ], + "angle": 0, + "content": "36. Kinnings, S. L., Liu, N., Tonge, P. J., Jackson, R. M., Xie, L. & Bourne, P. E. A machine learning-based method to improve docking scoring functions and its application to drug repurposing. Journal of chemical information and modeling 51, 408-419 (2011)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.315, + 0.887, + 0.34 + ], + "angle": 0, + "content": "37. Kalemati, M., Zamani Emani, M. & Koohi, S. BiComp-DTA: Drug-target binding affinity prediction through complementary biological-related and compression-based featurization approach. PLOS Computational Biology 19, e1011036 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.341, + 0.872, + 0.355 + ], + "angle": 0, + "content": "38. Wei, B. & Gong, X. DeepPLA: a novel deep learning-based model for protein-ligand binding affinity prediction (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.356, + 0.887, + 0.38 + ], + "angle": 0, + "content": "39. Probst, D., Schwaller, P. & Reymond, J.-L. Reaction classification and yield prediction using the differential reaction fingerprint DRFP. Digital discovery 1, 91-97 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.381, + 0.887, + 0.405 + ], + "angle": 0, + "content": "40. Rivera, Z. A., Tayo, L., Chen, B.-Y. & Tsai, P.-W. In silico Evaluation of the Feasibility of Magnolia officinalis Electronshutting Compounds as Parkinson's Disease Remedy. Letters in Drug Design & Discovery 21, 3039-3048 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.406, + 0.887, + 0.43 + ], + "angle": 0, + "content": "41. Pei, Q., Wu, L., Zhu, J., Xia, Y., Xie, S., Qin, T., Liu, H., Liu, T.-Y. & Yan, R. Breaking the barriers of data scarcity in drug-target affinity prediction. Briefings in Bioinformatics 24, bbad386 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.431, + 0.887, + 0.466 + ], + "angle": 0, + "content": "42. Xia, F., Shukla, M., Brettin, T., Garcia-Cardona, C., Cohn, J., Allen, J. E., Maslov, S., Holbeck, S. L., Doroshow, J. H., Evrard, Y. A., et al. Predicting tumor cell line response to drug pairs with deep learning. BMC bioinformatics 19, 71-79 (2018)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.468, + 0.887, + 0.492 + ], + "angle": 0, + "content": "43. Lind, A. P. & Anderson, P. C. Predicting drug activity against cancer cells by random forest models based on minimal genomic information and chemical properties. *PloS one* 14, e0219774 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.493, + 0.486, + 0.506 + ], + "angle": 0, + "content": "44. Euclidia. https://github.com/euclidia/public-models. 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.507, + 0.887, + 0.541 + ], + "angle": 0, + "content": "45. Leenay, R. T., Aghazadeh, A., Hiatt, J., Tse, D., Roth, T. L., Apathy, R., Shifrut, E., Hultquist, J. F., Krogan, N., Wu, Z., et al. Large dataset enables prediction of repair after CRISPR-Cas9 editing in primary T cells. Nature biotechnology 37, 1034-1037 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.543, + 0.887, + 0.578 + ], + "angle": 0, + "content": "46. Yang, K., Swanson, K., Jin, W., Coley, C., Eiden, P., Gao, H., Guzman-Perez, A., Hopper, T., Kelley, B., Mathea, M., et al. Analyzing learned molecular representations for property prediction. Journal of chemical information and modeling 59, 3370-3388 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.58, + 0.887, + 0.604 + ], + "angle": 0, + "content": "47. Preuer, K., Lewis, R. P., Hochreiter, S., Bender, A., Bulusu, K. C. & Klambauer, G. DeepSynergy: predicting anti-cancer drug synergy with Deep Learning. Bioinformatics 34, 1538-1546 (2018)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.605, + 0.887, + 0.629 + ], + "angle": 0, + "content": "48. Zheng, S., Rao, J., Zhang, Z., Xu, J. & Yang, Y. Predicting retrosynthetic reactions using self-corrected transformer neural networks. Journal of chemical information and modeling 60, 47-55 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.63, + 0.887, + 0.654 + ], + "angle": 0, + "content": "49. Boral, N., Ghosh, P., Goswami, A. & Bhattacharyya, M. Accountable prediction of drug ADMET Properties with molecular descriptors. bioRxiv, 2022-06 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.655, + 0.887, + 0.68 + ], + "angle": 0, + "content": "50. Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D. & Steinhardt, J. Measuring massive multitask language understanding. arXiv preprint arXiv:2009.03300 (2020)." + }, + { + "type": "list", + "bbox": [ + 0.111, + 0.092, + 0.888, + 0.68 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.864, + 0.938, + 0.884, + 0.949 + ], + "angle": 0, + "content": "58" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06196/00c24729-2bee-4726-a0b4-13a163fe9cf3_origin.pdf b/data/2025/2504_06xxx/2504.06196/00c24729-2bee-4726-a0b4-13a163fe9cf3_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e8abae9e5863dfd512922a865411e3cbebe39e3d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/00c24729-2bee-4726-a0b4-13a163fe9cf3_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b60b971ba28b54fa41629812a716f1eb2e50673a2b3ddf628869de97db74ff5 +size 2977240 diff --git a/data/2025/2504_06xxx/2504.06196/full.md b/data/2025/2504_06xxx/2504.06196/full.md new file mode 100644 index 0000000000000000000000000000000000000000..540009785ef9973736ef08549fc0632b8c3937ec --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/full.md @@ -0,0 +1,1140 @@ +# TxGemma: + +# Efficient and Agentic LLMs for Therapeutics + +Eric Wang*,†,1, Samuel Schmidgall*,1, Paul F. Jaeger1, Fan Zhang2, Rory Pilgrim2, Yossi Matias2, Joelle Barral1, David Fleet1 and Shekoofeh Azizi†,1 + +$^{1}$ Google DeepMind, $^{2}$ Google Research + +Therapeutic development is a costly and high-risk endeavor that is often plagued by high failure rates. To address this, we introduce TxGemma, a suite of efficient, generalist large language models (LLMs) capable of therapeutic property prediction as well as interactive reasoning and explainability. Unlike task-specific models, TxGemma synthesizes information from diverse sources, enabling broad application across the therapeutic development pipeline. The suite includes 2B, 9B, and 27B parameter models, fine-tuned from Gemma-2 on a comprehensive dataset of small molecules, proteins, nucleic acids, diseases, and cell lines. Across 66 therapeutic development tasks, TxGemma achieved superior or comparable performance to the state-of-the-art generalist model on 64 (superior on 45), and against state-of-the-art specialist models on 50 (superior on 26). Fine-tuning TxGemma models on therapeutic downstream tasks, such as clinical trial adverse event prediction, requires less training data than fine-tuning base LLMs, making TxGemma suitable for data-limited applications. Beyond these predictive capabilities, TxGemma features conversational models that bridge the gap between general LLMs and specialized property predictors. These allow scientists to interact in natural language, provide mechanistic reasoning for predictions based on molecular structure, and engage in scientific discussions. Building on this, we further introduce Agentic-Tx, a generalist therapeutic agentic system powered by Gemini 2.5 that reasons, acts, manages diverse workflows, and acquires external domain knowledge. Agentic-Tx surpasses prior leading models on the Humanity's Last Exam benchmark (Chemistry & Biology) with $52.3\%$ relative improvement over o3-mini (high) and $26.7\%$ over o3-mini (high) on GPQA (Chemistry). On ChemBench, TxGemma excels with improvements of $6.3\%$ (ChemBench-Preference) and $2.4\%$ (ChemBench-Mini) over o3-mini (high), as well as $17.7\%$ and $5.6\%$ over o1, respectively. TxGemma's collection is released as open models, enabling researchers to adapt and validate it on their own diverse datasets, thus facilitating more challenging real-world applications. + +# 1 Introduction + +The pharmaceutical industry faces significant challenges in bringing new therapeutics to market. High attrition rates and lengthy, costly development timelines [3, 4] necessitate innovative approaches to therapeutic development. Success requires a drug candidate to not only demonstrate efficacy but also possess favorable safety, metabolic stability, pharmacokinetic/pharmacodynamic properties and developability, among other characteristics. Determining these diverse characteristics often relies on a large array of complex and expensive experimental procedures, highlighting the need for more efficient methods. + +Computational approaches, such as machine learning, are emerging as powerful tools to address these challenges. Leveraging predictive models trained on curated datasets allows researchers to prioritize promising candidates early in the development process, reducing reliance on costly experimental assays [5]. Publicly available databases of molecular properties and biological activity are crucial for training and validating these models. In this area, a major development was the curation of the Therapeutics Data Commons (TDC) [6, 7, 8], which contains datasets and benchmarks for many different tasks throughout the therapeutic development pipeline, ranging from early-stage target identification to late-stage clinical trial approval. + +Recent advancements in large language models (LLMs) offer a compelling opportunity to leverage available datasets and address limitations in the therapeutic development process. LLMs have demonstrated the capacity to integrate and learn from diverse data sources across various domains, including scientific applications [9, 10, + +![](images/0203c8f019b173fb8d62c4ee904f1f418162cc6b09d58945c721893ac5ac6ad4.jpg) + +![](images/6c5ae632ad262260986f1fe3efcfbe3a474c92834958a1bd7c97690782bbf10d.jpg) + +![](images/778634aa5c8b1460d1e1005fbb3e9371ca11ba143cab37feb55f39531fd8284d.jpg) + +![](images/0114d9a00ab47d79177889b7773fe64b8b18a5fc2fdf408061a2db63ed40fe38.jpg) +Figure 1 | Overview of TxGemma. (top) All TxGemma variants are trained on diverse data sources of the Therapeutic Data Commons (TDC). TxGemma-Predict comes in three size variants (2B, 9B, and 27B) and is trained for high-performance predictions on a broad set of therapeutic development tasks. TxGemma-Chat features two variants (9B and 27B) and is trained on a combination of TDC data with general Gemma-2 instruction tuning data to retain conversational and reasoning capabilities. Agentic-Tx, a therapeutics-focused agentic system powered by Gemini 2.5, has access to 18 tools including TxGemma-Predict and TxGemma-Chat to collect external knowledge and manages complex tasks in either autonomous or interactive settings. (bottom-right) Absolute performance of Agentic-Tx compared to best-in-class models on three complex therapeutic-related reasoning benchmarks. The state-of-the-art (SOTA) values are obtained from [1, 2] and details are listed in Table 3. Dashed lines: L=lowest, M=mean, H=highest human scores. (bottom-left) Relative performance changes of TxGemma-Predict compared to the SOTA generalist model for each task type. The assignment of the 66 evaluated TDC tasks to task types is shown in Tables S.2 and S.3. The bottom bar chart shows a summary of results where TxGemma-Predict outperforms or nearly matches SOTA (light blue), and outperforms SOTA (darker blue). + +![](images/edd756cd1d85fd738f6f39022b8b79d554ff5a528cce9f961383a0bd3ccf1307.jpg) + +![](images/45bd5bb59ecbb59ebf3c3bd02e1a6e2b8cdf58c01e28987f97b63a2162234f73.jpg) + +![](images/fab9bc74db450dc8de75df386d1ccaa290832005e9a1245e9ad9844556fd03a3.jpg) + +![](images/6c7190df8fb3e455a4678103a2125ca56783f3046dad39ad10081da4aeb70ed1.jpg) + +11]. Their potential to connect disparate aspects of drug development, such as chemical structure, biological activity, and clinical trial outcomes, is particularly exciting. In this context, we have previously introduced Tx-LLM, a LLM fine-tuned from a collection of question-answer instruction-tuning datasets based on TDC [12]. While promising, the model's lack of conversational capabilities prevented reasoning or user interaction, limiting its value for scientists who require a model that can understand complex queries and engage in nuanced discussions. + +In this work, we introduce TxGemma, a suite of efficient, generalist LLMs trained for therapeutics. Building on, but significantly extending, our previous work [12], TxGemma leverages LLMs to synthesize information from diverse sources. The suite includes 2B, 9B, and 27B parameter models, fine-tuned from Gemma-2 [13, 14] using a collection of therapeutic instruction-tuning datasets encompassing small molecules, proteins, nucleic acids, diseases, and cell lines. For the first time in therapeutic AI, TxGemma features conversational counterparts capable of reasoning and explanation, moving beyond black-box predictions to facilitate mechanistic understanding and scientific discussions. Our key contributions are as follows: + +- Efficient Generalist Therapeutic LLMs: TxGemma represents a potential shift from task-specific AI to efficient generalist models in therapeutic development. These efficient LLMs (2B-27B parameters) offer a competitive alternative to specialized models, achieving strong performance across a broad range of predictive and generative tasks. Out of 66 therapeutic development tasks curated by TDC, TxGemma-Predict outperforms or nearly matches the state-of-the-art generalist model on 64 (outperforms on 45) and state-of-the-art specialist models on 50 (outperforms on 26). Additionally, fine-tuning TxGemma models on clinical trial adverse event prediction requires less data to achieve strong performance compared to base Gemma-2 models, an important advantage for data-limited fields. +- Explainable and Interactive Therapeutic Models: TxGemma-Chat introduces reasoning and explanation capabilities, bridging the gap between general LLMs and specialized property predictors. Scientists can interact with TxGemma-Chat using natural language, exploring complex questions, receive explanations for predictions (e.g., based on molecular structure), and engage in scientific discussions. +- Agentic Orchestration of Therapeutic Development Workflows: We further introduce Agentic-Tx, a therapeutics-focused agentic system powered by Gemini 2.5, demonstrating how TxGemma models can be integrated as tools. Equipped with 18 tools, Agentic-Tx solves complex, multi-step problems, achieving state-of-the-art results on reasoning-intensive chemistry and biology benchmarks, including Humanity's Last Exam [15] and ChemBench [1]. +- Enabling Innovative Research with Open Models: Understanding the prevalence of proprietary data in therapeutic research, we release TxGemma models trained only on datasets with commercial licenses as open models to empower researchers to adapt and refine them on their own data. This facilitates validation and potential performance improvements tailored to their specific research needs, paving the way for therapy safety and efficacy in more challenging real-world therapeutic applications. + +# 2 Methods + +# 2.1 Data + +Therapeutic Data Commons (TDC) We leverage the Therapeutic Data Commons (TDC) [7, 6], a comprehensive collection of 66 AI-ready datasets spanning the drug discovery and development pipeline. TDC includes over 15 million datapoints across various biomedical entities and encompasses single-instance prediction, multi-instance prediction, and generation tasks [7]. We focus on TDC tasks relevant to drug discovery, incorporating diverse therapeutic representations: SMILES strings (small molecules), amino acid sequences (proteins and peptides, including specialized representations for MHC molecules and T-cell receptors), nucleotide sequences (nucleic acids), and natural language text (disease/cell line names) (see Table S.6 for examples). Many tasks combine multiple representations. (See Table S.1 for task inclusion criteria and Tables S.7 and S.8 for biological contexts of certain tasks.) + +Therapeutic Instruction-Tuning Following Chaves et al. [12], we transform the raw TDC data into an instruction-tuning format suitable for LLMs. Each data point is formatted as a prompt: + +- Instruction: Briefly describes the task. +- Context: Provides 2-3 sentences of relevant biochemical background, derived from TDC descriptions and literature. +- Question: Queries a specific therapeutic property, incorporating textual representations of therapeutics and/or targets (e.g., "Does the following molecule cross the blood-brain barrier? "). +- Answer: Formatted as (A)/(B) for binary classification, a binned continuous value for regression, or a SMILES string for generation. + +This process yields 7,080,338 training, 956,575 validation, and 1,917,297 test data points (Figure S.1, Tables S.2 and S.3). Data splits closely follow TDC's recommended methodologies (random, scaffold, cold-start, combination, temporal) (Table S.2, Table S.3). Detailed task descriptions are in Tables S.4 and S.5. + +We employ a few-shot prompting strategy to promote in-context learning [16], using a blend of $70\%$ zero-shot and $30\%$ few-shot prompts [17, 12]. For few-shot prompts, we randomly sample examples from the training set (Table S.9), as intra-training set similarity is higher than training-test set similarity (Figure S.2). The number of examples is uniformly selected between 1 and 10 so that few-shot prompting is robust to the number of examples during evaluation. + +# 2.2 Modeling + +Base LLM. TxGemma is built upon the Gemma-2 [14] family of lightweight, state-of-the-art open LLMs. Gemma-2 models utilize a decoder-only transformer architecture, incorporating architectural modifications such as interleaved local-global attention and group-query attention, and are trained using Gemini technology [18]. We utilize Gemma-2 base models at 2B, 9B, and 27B parameters. 2B and 9B Gemma-2 models were initially trained via knowledge distillation [14]. + +Predictive Model Fine-Tuning. We fine-tune the 2B, 9B, and 27B Gemma-2 base models on the therapeutic instruction-tuning data derived from TDC, creating TxGemma-2B-Predict, TxGemma-9B-Predict, and TxGemma-27B-Predict, respectively. Training was performed across all TDC tasks, with mixture ratios proportional to the number of training data points (see Tables S.2 and S.3 for data distribution). This encompassed all approximately 7 million training examples, comprising 3.3 million from regression/generation and 3.7 million from binary classification tasks. Fine-tuning proceeded for 67B tokens (12 epochs) using 256 TPUv4 chips with 8-way data replication, 4-way sequence sharding, and 4-way model sharding. In this work, "TxGemma" generally refers to the generalist, predictive TxGemma-27B-Predict. + +Conversational Model Fine-Tuning. We also trained conversational counterparts, TxGemma-9B-Chat and TxGemma-27B-Chat, by supplementing the therapeutic instruction-tuning data with general instruction-tuning data, as detailed in the Gemma-2 report [14]. The training data mixture comprised $30\%$ therapeutic data and $70\%$ general instruction-tuning data. Conversational models were trained using the same number of tokens and TPU configuration as the predictive models. + +# 2.3 Evaluating Predictive Performance + +Prompting strategy For test set evaluations, we use 10-shot prompting, selecting exemplars from the nearest neighbors within the combined training and validation set (not the test set), as detailed in Table S.9. Nearest neighbors were determined using different methods based on molecule type. For small molecules, we used RDKit [19] to generate Morgan fingerprints (radius 2 and size 2048), representing molecular substructures as binary vectors. Subsequently, we used Chemfp [20] to compute Tanimoto similarities, which quantify fingerprint overlap. For amino acid and nucleotide sequences, nearest neighbors were defined by percent sequence identity, determined through multiple sequence alignments performed with Clustal Omega [21]. + +Performance Metrics and Statistical Tests We assess performance using the preferred metrics for each task, as defined by TDC [7] and used by previous models. Binary classification tasks are assessed with area under the receiver operating characteristic curve (AUROC), area under the precision-recall curve (AUPRC), and accuracy. Regression tasks use Spearman's and Pearson correlation coefficients, mean absolute error (MAE), and mean squared error (MSE). The USPTO generation task uses "set accuracy," scoring 1 for perfect overlap between predicted and true reactant sets, and 0 otherwise. Bootstrapped metrics are calculated + +Prompt: Imagine an early virtual screening campaign setting. Which of the following two candidates would you prefer for further development? + + + + + +Agent $\rightarrow$ TxGemma-ClinTox: Is the following toxic? + + + +TxGemma ClinTox: Toxic + +Agent $\rightarrow$ TxGemma-ClinTox: Is the following toxic? + + + +TxGemma ClinTox: Non-toxic + +![](images/a3c869088761c5ea30145738fb0e7de8fb845000d423b65d65ef9ab5098f21ca.jpg) +Figure 2 | Example workflow of agentic planning and execution with Agentic-Tx. Agentic-Tx uses the ReAct framework [22] to interleave thought with tool-usage. When a user poses a query, Agentic-Tx checks whether the query structure matches any defined tool trigger. If so, the query is routed to the corresponding tool, which (i) parses the request, (ii) invokes specialized logic, and (iii) returns a structured answer to the agent. The agent then composes a user-facing response. This adaptive tool-use mechanism is especially helpful for tasks that require external references, chemical data transformations, or precise chemical information, areas where self-contained LLMs often hallucinate. In the displayed example, Agentic-Tx uses two tools to solve a complex therapeutic task: TxGemma-Chat and the clinical toxicity prediction tool based on TxGemma-Predict. + +Agent $\rightarrow$ TxGemma-Chat: Given two drug candidates, what factors would influence your decision to prioritize one over the other in an early virtual screening campaign? + +TxGemma-Chat: Investigate whether the drugs would pass through a clinical trial, based on properties such as toxicity. + +Agent: Final answer: + +Natural Language Input + +Reason + Action + +Tool-use + +Final Answer + +using 1000 samples. To compare overall performance between two models across all TDC tasks, we use the non-parametric Wilcoxon signed-rank test and report the corresponding p-value (details in Appendix C.1). + +# 2.4 Agentic System + +One limitation of LLMs for discovery is that, while their prediction capabilities are powerful, they do not have access to up-to-date external knowledge, such as research articles or domain-specific prediction models. These knowledge cut-offs prevent the model from answering questions outside of its training scope. Additionally, some questions involve multiple reasoning steps to solve, for example, the question "What structural modifications could improve the potency of the given drug?" requires iteratively searching the drug's structural space and then prompting TxGemma to predict potency. + +Agentic-Tx, our therapeutics-focused agentic system powered by Gemini 2.5 [18], extends TxGemma's capabilities by orchestrating such complex workflows. Agentic-Tx employs a modular, tool-usage paradigm, in contrast to TxGemma's direct generation of solutions. + +Reasoning and Action Framework Agentic-Tx utilizes the ReAct framework [22], allowing it to interleave reasoning steps ("thoughts") with actions (tool use). The agentic system receives a task or question and iteratively takes actions based on its current context. Each action typically involves using a tool, which + +returns an observation. Key to ReAct is this iterative process of observing, reasoning, and acting, allowing Agentic-Tx to dynamically adjust its approach based on the information it gathers. Because tools may return large outputs, we summarize these observations in order to maintain a concise and relevant context. This iterative process of observing, reasoning, acting, and updating its context allows Agentic-Tx to dynamically adjust its approach and gather the necessary information to answer the initial query. Finally, Agentic-Tx integrates the gathered information and formulates a user-friendly response. + +Agentic Tools Agentic-Tx is equipped with 18 tools across four categories (detailed tool descriptions are in Table S.12). They can be broadly categorized as: + +1. TxGemma-based Tools: These provide access to TxGemma's capabilities. The Chat tool enables interaction with TxGemma-27B-Chat. The ClinicalTox and ToxCast tools utilize TxGemma-27B-Predict for toxicity predictions. $IC_{50}$ returns the predicted normalized $IC_{50}$ between a drug and protein, the Mutagenicity tool predicts drug mutagenicity, and the Phase1 Trial tool predicts whether a drug would pass a Phase 1 clinical trial. +2. General Tools: These query external knowledge resources, including PubMed, Wikipedia, and the web. +3. Molecule Tools: These leverage domain-specific libraries for tasks such as retrieving molecular descriptors (e.g., from PubChem) and performing chemical structure conversions. +4. Gene & Protein Tools: These leverage domain-specific libraries for tasks involving genes or proteins, such as retrieving gene descriptions and protein descriptions (e.g., from the NCBI Gene database). + +# 3 Results + +# 3.1 TxGemma Predictive Performance + +# 3.1.1 Comparison with best-in-class therapeutic models + +To provide a comprehensive evaluation of our models' predictive capabilities, we benchmark against both specialist and generalist baselines. For specialist comparisons, we define best-in-class performance metrics for each task using previous models. Specifically, we utilize TDC leaderboard scores for tasks where available (ADMET, DrugCombo, DTI DG). For remaining tasks, values are reported from a literature review and are detailed in Tables S.13 and S.14. These specialist performance values align with those reported in Chaves et al. [12]. Additionally, we compare our models against three prominent therapeutic generalist and multi-task models: Tx-LLM [12], LlaSMol [23], and MolE [24]. Tx-LLM, with its two size-variants S and M, shares similar training data to our approach enabling a direct comparison across all tasks. LlaSMol a suite of generalist models built upon fine-tuned open-source LLMs trained for small-molecule applications [23]. Similarly, MolE was developed as a graph-based multi-task foundation model for small molecules. LlaSMol and MolE, specialized for small molecules, offer strong baselines for small molecule tasks. + +TxGemma shows improved performance compared to therapeutic generalist models In Figure 3, we compare the performance of TxGemma-27B-Predict to the two existing models in the Tx-LLM [12] family, Tx-LLM M and Tx-LLM S, built over PaLM-2 on TDC tasks. TxGemma-27B-Predict surpasses Tx-LLM M on 45 out of 66 tasks, while underperforming on 21. In addition, it outperforms Tx-LLM S on 62 and underperforms Tx-LLM S on only 4. Aggregating performance over task, we observe a statistically significant improvement of TxGemma-27B-Predict over Tx-LLM models $(p = 0.003$ , Wilcoxon signed-rank test). These results demonstrate that TxGemma provides a highly competitive alternative to its predecessor with improved functionality at a substantially reduced model size. + +TxGemma is competitive with specialist therapeutic models Figure 4 and Figure S.4 compare TxGemma's performance with best-in-class specialist model across tasks containing various combinations of SMILES, amino acid, nucleotide, and text inputs. In a comparison with specialist best-in-class models, TxGemma-27B-Predict outperforms the state-of-the-art (SOTA) on 26 and performs near SOTA on 50. This is a substantial improvement over its predecessor Tx-LLM M, which outperformed SOTA on 22 tasks and near SOTA on 43. These results demonstrate the improved capabilities of TxGemma-27B-Predict and its competitiveness with current specialist models designed for specific tasks and therapeutic feature types. + +![](images/e02f5ed44e79ec81692d41c9c6fe4b312e200e4239d7e952545b07549038790e.jpg) +Figure 3 | Comparison of TxGemma-Predict's performance with therapeutic generalist models. (top) relative performance improvement of TxGemma-27B-Predict in comparison to Tx-LLM S. TxGemma-27B-Predict outperforms Tx-LLM S on 62 and underperforms on only 4. (bottom) relative performance improvement of TxGemma-27B-Predict in comparison to Tx-LLM M. TxGemma-27B-Predict outperforms Tx-LLM M on 45 out of 66 tasks, while underperforming on 21. When aggregating performance over task, we observe a net improvement of TxGemma-27B-Predict over Tx-LLM models, with a statistically significant difference $(p = 0.003$ , Wilcoxon signed-rank test). These results establish TxGemma-27B-Predict as a competitive and functionally enhanced alternative at practical model sizes. Values for each task can be found in Tables S.15 and S.16. + +TxGemma performs similarly to multi-task models specialized for small molecules Table 1 and Figure S.6 compare the predictive performance of TxGemma-27B-Predict with MolE, a graph-based multi-task foundation model for small molecules. MolE performs within the $95\%$ CIs of TxGemma-27B-Predict for 15 out of 22 tasks. Furthermore, both TxGemma-27B-Predict and TxGemma-9B-Predict outperform LlaSMolMistral (7B), the top performing model from the LlaSMol suite, on 2 of 5 shared tasks and within $95\%$ CIs on 2 additional tasks (Table 2 and Figure S.5). All metrics for MolE and LlaSMol are reported from Mendez-Lucio et al. [24] and Yu et al. [23]. Given their specialization in small-molecule tasks, LlaSMol and MolE provide strong baselines for evaluating generalist models. Notably, TxGemma, a generalist model encompassing diverse drug types and many different tasks, achieves competitive performance with these dedicated models designed for a narrower range of small-molecule tasks. + +# 3.2 TxGemma Conversational Capabilities + +While TxGemma-27B-Predict performs well on prediction tasks, training solely on instruction tuning data for therapeutic properties limits its conversational capacity. TxGemma-27B-Predict can engage in general + +![](images/ebfa122c4e8e4aaa8d9e1067f57d4762781618f9e07826cee01379d3677e3b2b.jpg) + +![](images/9b152fe3c52d378f2d027185f60d5bd699d4d295bd539075317ae94dfc9ac7a1.jpg) + +![](images/2c3dedc41b7c9e61f949673cd15587fced5ab7c9ed1cd38b27f8cc2f630441e6.jpg) + +![](images/1996f43f70147e885c9ca69f311159a6643604936abffe7afc9aded5bd4946cc.jpg) + +![](images/3cd0a375ee3d7de3a86068d0c489eab4f7df74f4ad4e1134db7d492a2c938c19.jpg) +SMILES + +![](images/5019a41ee3d7b9b09cfa6b183c6cca835c79d3fc3f8b7e470d8b3d39c395d574.jpg) +SMILES + Text + +![](images/16baf39adc18c0e4e4aa59b45fb1b03adc1282067ff3e5c3d12ac88c14a15786.jpg) +Figure 4 | Comparison of TxGemma's performance with best-in-class specialist models. TxGemma-27B-Predict is evaluated on each task in TDC and compared to the corresponding best-in-class competitor. The panels depict different metrics used to evaluate the tasks. Tasks are colored by their feature types including one or a combination of SMILE, Amino acid, Nucleotide and text as indicated in the legend. Marker sizes illustrate the number of data points in the task on a log scale. The larger shaded area in blue indicates where TxGemma outperforms best-in-class models, while the narrower light blue shaded area indicates where TxGemma is performing near best-in-class model (defined as within $10\%$ ). MAE and MSE values are log-transformed since the magnitudes of these values depend on the units of outputs. Generation accuracy is the fraction of correct SMILES strings in the USPTO generation task. Values for each task can also be found in Tables S.13 and S.14. + +![](images/8b46427efa8d29aead6de741c144673fa93c545121e1a5c5fd8f01be42910034.jpg) +Amino acid +Nucleotide + Amino acid +Amino acid + Text +Amino acid + SMILES +Nucleotide + +conversation, but its performance deteriorates when prompts deviate from the expected format. Figure S.9 shows an example of such decline in TxGemma-27B-Predict's conversational capabilities. To expand the TxGemma family's capabilities and provide a more versatile tool with the ability to explain its reasoning, we trained TxGemma-Chat with a mix of therapeutic and general instruction-tuning data as detailed in Section 2.2. We evaluate these new conversational capabilities through a combination of standard LLM benchmarks and qualitative examples. We also run our models through assurance evaluations, as done for Gemma-3 [25], to verify that TxGemma models adhere to safety policies. + +Table 1 | Comparative performance of TxGemma and MolE on small molecule tasks. Details of the predictive performance of TxGemma-27B-Predict and MolE, a graph-based molecular multi-task foundation model, across various pharmacokinetics and toxicity tasks. Bold values indicate the best performance for each task. Metrics for MolE are reported from Mendez-Lucio et al. [24]. TxGemma-27B-Predict values are bootstrapped averages and $95\%$ CIs. These pharmacokinetics and toxicity tasks are publicly available in TDC [7]. + +
Task TypeTaskMetricMolE [24]TxGemma-27B-Predict
PharmacokineticsCaco2 WangMAE (↓)0.3290.401 (0.358-0.449)
Lipophilicity AstraZenecaMAE (↓)0.4060.538 (0.507-0.570)
Solubility AqSolDBMAE (↓)0.7760.907 (0.870-0.948)
PPBR AZMAE (↓)7.2299.048 (8.141-10.111)
HIA HouAUROC (↑)0.9840.988 (0.972-0.999)
Pgp BroccatelliAUROC (↑)0.9300.937 (0.904-0.964)
Bioavailability MaAUROC (↑)0.6400.694 (0.575-0.801)
BBB MartinsAUROC (↑)0.9030.908 (0.872-0.938)
CYP3A4 Substrate CarbonMangelsAUROC (↑)0.6920.691 (0.601-0.784)
CYP2D6 VeithAUPRC (↑)0.6790.683 (0.639-0.726)
CYP3A4 VeithAUPRC (↑)0.8760.854 (0.836-0.872)
CYP2C9 VeithAUPRC (↑)0.7820.798 (0.767-0.826)
CYP2D6 Substrate CarbonMangelsAUPRC (↑)0.6920.711 (0.570-0.830)
CYP2C9 Substrate CarbonMangelsAUPRC (↑)0.4090.438 (0.302-0.576)
VDss LombardoSpearman (↑)0.6440.559 (0.457-0.655)
Half Life ObachSpearman (↑)0.5780.458 (0.306-0.594)
Clearance Microsome AZSpearman (↑)0.6320.462 (0.353-0.565)
Clearance Hepatocyte AZSpearman (↑)0.4560.260 (0.129-0.384)
ToxicityLD50 ZhuMAE (↓)0.6020.627 (0.597-0.660)
hERGAUROC (↑)0.8350.885 (0.813-0.946)
AMESAUROC (↑)0.8340.816 (0.795-0.838)
DILIAUROC (↑)0.8520.886 (0.810-0.947)
+ +Table 2 | Comparative performance of TxGemma and LlaSMol on small molecule tasks. Comparison of TxGemma-27B-Predict with LlaSMolMistral (best LlaSMol model at 7B) across shared small-molecule tasks. Bold values indicate the best performance for each task. Metrics for LlaSMolMistral are reported from Yu et al. [23]. TxGemma-Predict values are bootstrapped averages and $95\%$ CIs. These pharmacokinetics, toxicity, and high-throughput screening data and tasks are publicly available in TDC [7] + +
Task TypeTaskMetricLlaSMolMistral [23]TxGemma-27B-PredictTxGemma-9B-Predict
PharmacokineticsBBBP†Accuracy (↑)0.7460.869 (0.835-0.901)0.847 (0.813-0.881)
ESOL†RMSE (↓)1.1501.250 (1.185-1.321)1.360 (1.246-1.480)
Lipo†RMSE (↓)1.0100.710 (0.668-0.752)0.742 (0.700-0.787)
ToxicityClintoxAccuracy (↑)0.9310.926 (0.896-0.956)0.925 (0.892-0.953)
High-throughput screeningHIV*Accuracy (↑)0.9670.968 (0.964-0.972)0.965 (0.961-0.969)
+ +* To predict whether compounds have anti-HIV properties. +† Task name is modified to match the nomenclature from Yu et al. [23]. + +TxGemma-Chat bridges the gap between property predictors and general language models To assess the performance of TxGemma-Chat as a general conversational LLM, we evaluated it on the Massive Multitask Language Understanding (MMLU) [26] benchmark, a comprehensive suite of 57 diverse tasks spanning mathematics, history, computer science, law, etc. This benchmark evaluates knowledge, reasoning, + +![](images/cff609f1cff46876c76c043fcbdff25eab187a12d98a2bd260d1ab40fc7d3e3e.jpg) +Figure 5 | TxGemma-Chat bridges the gap between property predictors and general LLMs. Each point represents a therapeutic task in the TDC. The figure depicts relative predictive performance changes of TxGemma-Chat in comparison to TxGemma-Predict (top) and Gemma-2 (bottom) for 9B variants left and 27B variants in right. As expected, TxGemma-27B-Predict outperforms TxGemma-27B-Chat on therapeutic tasks, with TxGemma-27B-Chat showing a $10.69\%$ median relative performance reduction. However, TxGemma-27B-Chat exceeds the Gemma-2-27B baseline by $29.67\%$ on TDC therapeutic tasks. Similarly, TxGemma-9B-Chat's performance is $10.32\%$ lower than TxGemma-9B-Predict's. Values for each task can be found in Tables S.15 and S.16. + +![](images/29542d68bc684f7ee0c7db74899e9f4990c401e026299fb8377fbbe2aa9ea653.jpg) + +and problem-solving abilities across a wide range of academic subjects, providing a measure of overall language understanding. It comprises 14,079 multiple-choice questions, each with four possible answers. For this multiple-choice format, we took the model's prediction as the option with the highest log-likelihood in a zero-shot setting and report overall accuracy as well as per-subject accuracy. + +Figure S.7 compares the performance of TxGemma-27B-Chat, TxGemma-27B-Predict, and Gemma-2-27B on MMLU, a standard benchmark for evaluating general LLMs. TxGemma-27B-Chat achieves an accuracy of $73.87\%$ , slightly lower than Gemma-2-27B's $75.38\%$ , but TxGemma-27B-Chat shows slight improvements in areas such as medical genetics, high school statistics, and college chemistry. Furthermore, TxGemma-27B-Chat significantly outperforms TxGemma-27B-Predict, which has an accuracy of $53.60\%$ . This suggests that while fine-tuning solely on therapeutic data can diminish general knowledge acquired during pre-training, incorporating general instruction-tuning data can mitigate this effect. + +Furthermore, we assess TxGemma-27B-Chat on all therapeutic tasks within TDC. Figure 5 compares the relative performance changes of TxGemma-27B-Chat to TxGemma-27B-Predict and Gemma-2-27B for both 9B and 27B variants across these tasks. As anticipated, TxGemma-27B-Predict outperforms TxGemma-27B-Chat on these predictive tasks, with a median relative performance reduction of $11\%$ observed for TxGemma-27B-Chat. Nevertheless, TxGemma-27B-Chat surpasses the baseline Gemma-2-27B, demonstrating a median relative improvement of $30\%$ . Similarly, TxGemma-9B-Chat shows a $10\%$ median relative performance reduction compared to TxGemma-9B-Predict. Regression tasks experience the greatest performance decline from the general-purpose training. These results demonstrate how TxGemma-Chat bridges the gap between therapeutic property predictors and general LLMs, functioning as a unified model for both capabilities. + +TxGemma-Chat can provide reasoning for complex tasks. A particularly compelling application of conversational models lies in prompting them to explain their predictions to users. While general LLMs may possess some foundational knowledge concerning therapeutic challenges, they are not accurate for property prediction (Figure 5). In Figure 6, we prompt TxGemma-27B-Chat to answer a question regarding blood-brain barrier permeability using the BBB Martins prompt format. TxGemma-27B-Chat provides only the answer in the initial turn, but when given a subsequent prompt to articulate its rationale, the model provides mechanistic reasoning for its answer based on molecular solubility and the structure of the input molecule derived from the SMILES string. All of this reasoning occurred directly within the model weights, without requiring any preprocessing of the SMILES string. + +Interestingly, prompting structures enable TxGemma-Chat to provide additional reasoning on complex tasks. For instance, while the relationship between blood-brain barrier permeability and lipophilicity is intuitive, some + +Table 3 | Performance of Agentic-Tx. Accuracy of Agentic-Tx compared with SOTA models on ChemBench, GPQA, and HLE benchmarks. + +
ModelChemBenchGPQA (Diamond)Humanity's Last Exam
MiniPreferenceChemistryChemistry & Biology
Agentic-Tx (Gemini 2.5-Pro)84.566.281.720.1
Agentic-Tx (Gemini 2.0-Pro)83.465.562.414.5
Agentic-Tx (Gemini 1.5-Pro)80.665.051.811.9
Claude-3.5 (Sonnet)73.0*60.0*†40.4-
GPT-4o72.0*59.0*43.8**3.8
Gemini 2.5-pro82.865.579.517.9
Gemini 2.0-pro79.658.453.311.1
Gemini 1.5-pro74.955.648.210.6
PaperQA2 [28]67.0*56.0*--
o180.0*56.0*64.7**12.3
o3-mini (medium)82.461.362.513.0
o3-mini (high)82.562.064.513.2
Human Expert (Average Performance)27.0---
+ +$(\dagger)$ Using ReAct framework, $(^{*})$ Extracted from [1], $(^{**})$ Extracted from [2] + +tasks such as predicting clinical trial approval are more challenging to reason over. If TxGemma-27B-Chat is prompted to provide reasoning in the same manner as in Figure 6 for predicting clinical trial approval, TxGemma-27B-Chat refuses and directs the user to alternative sources. However, when modifying the original prompt, instructing the model to output reasoning steps before the final answer, it bypasses the refusal and restores reasoning capabilities (Figure S.10). + +# 3.3 Agentic Planning and Execution based on TxGemma + +Agentic-Tx demonstrates competitive performance on therapeutic benchmarks. We evaluate the capability of Agentic-Tx to assist with therapeutics tasks by means of questions from three benchmarks: GPQA (Diamond) [27], ChemBench [1], and Humanity's Last Exam (HLE) [15]. Within each benchmark, we use existing selections of therapeutic-relevant questions; for GPQA we evaluate GPQA-Chemistry (47 questions), for ChemBench we evaluate ChemBench-Chemical Preference which aims to select an ideal candidate molecule for therapeutic development (1,001 question) and ChemBench-mini, which evaluates across 8 categories of chemistry from toxicity/safety to organic chemistry (236 questions). Finally, for HLE, we evaluate HLE-Chemistry and HLE-Biology (235 questions). For open-ended questions in HLE, we observed a high variation of metric scores depending on the selection of the LLM-rater model [15]. To ensure an objective accuracy measure, we restrict the evaluation to multiple choice questions (MCQs). + +As shown in Table 3, Agentic-Tx (Gemini 2.5-Pro), Agentic-Tx (Gemini 2.0-Pro), and Agentic-Tx (Gemini 1.5-Pro) achieve competitive or greater accuracy compared to existing SOTA models across several benchmarks. Specifically, Agentic-Tx (Gemini 2.5-Pro) and Agentic-Tx (Gemini 2.0-Pro) surpasses prior SOTA models on the exceptionally difficult Humanity's Last Exam benchmark (Chemistry & Biology tasks), with Agentic-Tx (Gemini 2.5-Pro) achieving $52.3\%$ relative improvement over o3-mini (high) and $13.4\%$ over Gemini 2.5-pro, as well as on ChemBench, with relative improvements of $6.3\%$ (ChemBench-Preference) and $2.4\%$ (ChemBench-Mini) over o3-mini (high) and $1.1\%$ (ChemBench-Preference) and $2.0\%$ (ChemBench-Mini) over Gemini 2.5-pro. On GPQA (Diamond), Agentic-Tx also achieves SOTA accuracy with $26.7\%$ relative improvements over o3-mini and $2.7\%$ over Gemini 2.5-pro. All variants of Agentic-Tx outperform their corresponding base Gemini models across all benchmarks, indicating the effectiveness of the Agentic-Tx framework in enhancing LLMs efficacy for advanced reasoning within this domain. This suggests that agentic workflows such as ours represent useful tools for therapeutic development, particularly in areas requiring domain knowledge and the selection of candidate molecules. The agent's ability to leverage external tools and perform multi-step + +Instructions: Answer the following question about drug properties. + +Context: As a membrane separating circulating blood and brain extracellular fluid, the blood-brain barrier (BBB) is the protection layer that blocks most foreign drugs. Thus the ability of a drug to penetrate the barrier to deliver to the site of action forms a crucial challenge in development of drugs for central nervous system. + +Question: Given a drug SMILES string, predict whether it + +(A) does not cross the BBB (B) crosses the BBB + +Drug SMILES: C1CN(CCC1(C=O)NC(CCO)C2=CC=C(C=C2)Cl)N)C3=NC=NC4=C3C=CN44 + +# TxGemma + +Answer: (B) + +Explain your reasoning based on the molecule structure. + +# TxGemma + +The molecule has several properties that suggest it can cross the blood-brain barrier (BBB): + +Lipophilicity: The molecule contains multiple aromatic rings and a long alkyl chain, contributing to its lipophilicity. Lipophilicity is a key factor in BBB penetration, as the barrier is primarily composed of lipids. + +Molecular weight: The molecular weight is within a range (around 400 Da) that is generally considered favorable for BBB permeability. + +Hydrogen bond donors and acceptors: While there are hydrogen bond donors and acceptors present, they are not excessive, which can sometimes hinder BBB passage. + +Lack of charged groups: The molecule lacks any charged groups at physiological pH, making it less likely to be repelled by the BBB's tight junctions. + +Overall, the combination of lipophilicity, moderate molecular weight, balanced hydrogen bonding capacity, and lack of charged groups suggests that this molecule has a good chance of crossing the blood-brain barrier. + +What makes passing through the blood-brain barrier different from passive diffusion through a membrane? + +# TxGemma + +While passive diffusion is a major mechanism for molecules to cross the blood-brain barrier (BBB), there are key differences that make BBB passage more challenging and selective: + +1. Tight Junctions: The cells forming the BBB are packed very tightly together with specialized structures called tight junctions. These junctions seal the spaces between cells, significantly reducing the gaps through which molecules can passively diffuse. +2. Specialized Transport Systems: The BBB has a variety of specialized transport systems that actively move specific molecules across the barrier. These systems are highly selective and often require molecules to have specific structural features or be recognized by specific receptors. +3. Efflux Pumps: The BBB also expresses efflux pumps, which are proteins that actively pump molecules back out of the brain. This mechanism helps to prevent potentially harmful substances from entering the brain and maintain a stable brain environment. +4. Metabolic Enzymes: The BBB contains enzymes that can metabolize and inactivate certain molecules, further limiting their entry into the brain. +5. Astrocyte Endfeet: Astrocytes, a type of glial cell, have projections called endfeet that surround blood vessels in the brain. These endfeet contribute to the BBB's barrier function and regulate the passage of molecules. + +In summary, while passive diffusion plays a role in BBB permeability, the presence of tight junctions, specialized transport systems, efflux pumps, metabolic enzymes, and astrocyte endfeet make the BBB a highly selective barrier that restricts the entry of many molecules. This selectivity is crucial for protecting the brain from harmful substances and maintaining a stable internal environment, but it also poses challenges for drug delivery to the central nervous system. + +Is my molecule likely to run into these issues? + +# TxGemma + +Let's analyze how your molecule might be affected by the specific challenges of the BBB ... + +Figure 6 | TxGemma-Chat can be prompted to explain its reasoning or provide additional context. Following an initial question, TxGemma-27B-Chat is asked to explain its reasoning based on molecule structure in the second turn. The model uses its understanding of chemistry and biology to justify its answer and can continually engage with the user on follow-up questions. + +reasoning enables it to address more complex queries beyond the scope of traditional LLMs. + +Agentic-Tx effectively leverages various tools based on the therapeutic task requirement. In Figure S.14, we investigate tool usage frequency within the Agentic-Tx system across the ChemBench-Preference and Biology and Chemistry (B&C) HLE datasets. Our analysis reveals that Agentic-Tx tool usage distribution varies significantly depending on the task and dataset. For the ChemBench-Preference task, which focuses on selecting ideal candidate molecules for therapeutic development, the Agentic-Tx system exhibits a high frequency of usage for tools such as SMILES description and toxicity prediction. This suggests a strong emphasis on molecular characterization and safety assessment in this task correctly invoked by Agentic-Tx. In contrast, on the B&C HLE dataset, tool usage is predominantly concentrated on general knowledge retrieval tools like PubMed or Wikipedia search. This indicates that the Agentic-Tx system relies heavily on accessing and synthesizing broad biological or chemical knowledge to address questions in these domains. In Figure S.15, we investigate the breakdown of tool interactions per question and explore how these interactions contribute to performance variations. Our analysis shows that each question can involve up to 8 tool calls, and the high usage of tools such as SMILES description and toxicity prediction tools correlates with overall performance improvement. These results highlight the Agentic-Tx system's adaptive nature, demonstrating its ability to leverage different tools based on the specific requirements of the task. + +Agentic-Tx inference time is suitable for real time human interaction Analysis of Agentic-Tx's inference time indicates efficient performance characteristics. The median time observed for tool execution is 0.55 seconds. The fastest tool (Gene Sequence) completes execution in 0.15 seconds, while the slowest (ToxCast) requires 28.2 seconds. This suggests that Agentic-Tx operates within a timeframe conducive to real-time user interaction. The observed latencies demonstrate suitability for integration into workflows where immediate feedback and responsiveness are desired. The system's ability to maintain a median inference time below one second contributes to an efficient user experience. + +# 3.4 Additional Analysis and Ablations + +Data contamination analysis and data leakage considerations To assess potential data contamination from the Gemma-2 pretraining data, we calculated the overlap between features in the therapeutic instruction-tuning data and the pretraining corpus. For multi-instance tasks, contamination was defined as the presence of any constituent feature (e.g., drug SMILES or target protein sequence in drug-target binding) in the pretraining data. The majority of tasks showed no direct contamination (Figure S.12). For tasks with some contamination, filtering contaminated datapoints and recalculating TxGemma-27B-Predict performance revealed no significant changes (Figure S.13). + +While direct contamination was minimal, we further investigated potential indirect contamination. Although SMILES strings are less common in general web text, pretraining on molecular names could have created learned associations between names and SMILES, potentially influencing test set performance. To test this, we compared the similarity of TxGemma-27B-Predict embeddings for PubChem molecules represented as SMILES strings and their corresponding IUPAC names, against the similarity of embeddings for SMILES strings paired with decoy (randomly selected, incorrect) names. The similarities were statistically equivalent (Figure S.12), confirmed by a two one-sided t-test $(p = 3 \times 10^{-12}$ , $\delta = 0.02)$ . This suggests that TxGemma-27B-Predict did not learn spurious name-SMILES associations during pretraining, likely because names and SMILES were encountered in separate training phases and for different molecules. Therefore, both direct and indirect contamination from pretraining are unlikely to significantly affect our results. + +Fine-tuning TxGemma models improves data efficiency. Given the scarcity of therapeutic data and the potential of TxGemma to serve as a pretrained model for further adaptation, we investigated TxGemma's data efficiency and generalization to new tasks in out-of-distribution settings. Specifically, we fine-tuned the baseline model Gemma-2-27B as well as our TxGemma-27B-Predict on adverse event prediction data from TrialBench [29]. Serious adverse events are critical in assessing the safety profile of a new treatment and accurate prediction of these events allows for better risk management and resource allocation [29]. To ensure a fair evaluation of generalization, we filtered the TrialBench test set to exclude samples overlapping with phase 1, 2, or 3 of clinical trial outcome prediction data in TDC. In addition, datapoints without available SMILES strings are excluded. This lead to 14,368 train and 3,184 test samples. + +Figure 7 | TxGemma improves efficiency at adverse event prediction from SMILES strings. The figure shows the AUROC of predicting adverse events in a clinical trial from the drug SMILES strings as a function of the training data fraction for Gemma-2-27B and TxGemma-27B-Predict. Clinical trials are separated based on trial phase, and datapoints without available SMILES strings are excluded. To assess model performance with additional textual information, separate models trained on both SMILES strings and additional textual information are indicated by colored dashed lines, and SOTA models are indicated by gray dashed lines. (S) denotes models trained with SMILES strings only, and $(\mathrm{S} + \mathrm{T})$ those trained with SMILES and textual information (Table S.10). +![](images/4dfa44b4877c0dff07f7e32085f8af1e800f4ac8bc4d1d5740a2f5789b0e102d.jpg) +Gemma-27B (S) $\rightarrow$ TxGemma-27B-Predict (S) --- Gemma-27B (S+T) —— TxGemma-27B-Predict (S+T) --- Best-in-class (S+T) + +![](images/868848d097319f43df2f2324861cfe2384f376dcae1a95d6563e3f7b212d5ba8.jpg) + +![](images/6eb51e87119992d386790213cb24f8ca1436c165f652aa4c124e908de08b04eb.jpg) + +![](images/0ab979193ce5715dcc7b650b9114e63d4a217e3ebcd07ca293810378aa73bd12.jpg) + +We consider two settings. Initially, we focus exclusively on drug SMILES strings as the only feature contributing to clinical trial outcome, thereby isolating the influence of therapeutic information by excluding this additional context. To simulate data limitations, we fine-tuned TxGemma-27B-Predict and the baseline Gemma-2-27B on varying fractions of the training data, and then evaluated the newly fine-tuned models performance on the test set after 30 epochs of training (Figure 7). Overall, TxGemma-27B-Predict achieved higher AUROCs with lower amounts of training data, matching the performance of Gemma-2-27B with less than $10\%$ of retraining data. In the second setting, we explored the performance ceiling by incorporating textual information about the clinical trials, increasing the number of tokens provided to the models by a factor of 4 (Table S.10). This is the setting used by the best-in-class model for adverse event prediction [29]. The addition of textual information allowed our models to consistently outperform existing SOTA methods [29]. However, the performance difference between TxGemma-27B-Predict and Gemma-2-27B decreased in this scenario because the additional textual information diluted the relative importance of the drug SMILES strings. + +TxGemma inference time is suitable for virtual screening In Figure S.11, we plot the inference speed of TxGemma models of all sizes normalized by the number of TPUv5e chips used for serving. All model sizes are suitably fast for virtual screening, as even the largest 27B model is able to inference around 9,000 samples per day per TPU chip. Using 64 chips for serving, this would yield around 600,000 samples per day for the 27B model, and the smallest 2B model would reach 3,000,000 samples per day. + +Correlation between clinical trial approval and toxicity predictions We investigated the correlation between TxGemma's clinical trial approval predictions (based on SMILES and target disease) and its toxicity predictions (using TDC's AMES, DILI, and hERG tasks). Figure S.18 shows a consistent, but weak (0.15-0.35), positive Spearman correlation across all phases. This suggests TxGemma associates lower predicted toxicity with approval, but may also consider other factors such as efficacy or drug-likeness. + +Impact of feature types Figure S.16 presents a performance breakdown of TxGemma-27B-Predict by feature type, compared to Tx-LLM M. In both models, tasks incorporating both SMILES strings and textual features (e.g., disease names, cell line names/description) show the most significant improvement over SOTA. This suggests that the contextual knowledge acquired during LLM pretraining could aid in synthesizing textual information with molecular representations. + +Model size and domain fine-tuning ablations Figure S.17 compares the performance of TxGemma-Predict models across different sizes (2B, 9B, and 27B) on TDC tasks. Pairwise comparisons using the Wilcoxon + +signed-rank test indicate that model size is a significant factor: TxGemma-27B-Predict outperforms TxGemma-9B-Predict $(p = 0.013)$ and TxGemma-2B-Predict $(p = 6.2 \times 10^{-6})$ , and TxGemma-9B-Predict outperforms TxGemma-2B-Predict $(p = 0.048)$ . Furthermore, comparing TxGemma models to their corresponding base Gemma-2 models reveals the significant impact of domain fine-tuning. All TxGemma models significantly outperform their Gemma-2 counterparts $(p < 10^{-10}$ , Wilcoxon signed-rank test), underscoring the importance of specialized training for therapeutic tasks. + +# 4 Related work + +Task-specific models for chemistry and therapeutics. In recent years, there has been a surge in the development of deep learning models designed for various chemistry applications. Amongst those, graph neural networks (GNNs) have been applied for a wide variety of molecular prediction or generation tasks because small molecules are naturally represented as graphs [30, 31, 32, 33, 34, 35, 36, 37, 24]. Another common representation for small molecules is molecular fingerprints [38], which are binary vectors that capture the local environment of each atom [30, 39, 40]. + +TxGNN trained a GNN on medical knowledge graphs in order to perform zero-shot drug repurposing for diseases with limited treatment options [41]. AlphaFold and its successors have also significantly advanced the field of protein structure prediction and protein design [42, 43, 44, 45, 46]. These models have been influential for both mechanistic research and the development of structure-based drugs [47]. + +Large language models for biology and chemistry. Transformer-based models [48] have fueled the development of LLMs, which are trained on massive textual datasets with subsequent instruction-tuning [49] or alignment [50]. LLMs have demonstrated exceptional proficiency in various tasks, including text summarization, translation, and question answering [16, 51, 52]. Their ability to encode vast amounts of information and generalize to new tasks has sparked considerable interest in their potential applications across diverse domains. + +There has been increasing interest in applying the development for LLMs to scientific research. BrainGPT fine-tuned LLMs on neuroscience literature and found greater performance than domain experts [53]. LlaSMol fine-tuned LLMs on small molecule datasets and achieved near-SOTA performance on multiple tasks [23]. CLAMP used separate modules for natural language and molecular inputs, combining them together in a contrastive pre-training objective [54]. Protein language models [55, 56, 57, 58] and genomic language models [59, 60, 61] have used self-supervised pretraining to generate embeddings useful for downstream tasks. ProtLLM [62], BioT5 [63], and GraphToken [64] combine molecule or proteins with LLMs using textual or multi-modal strategies. Cellular foundation models such as scGPT [65], GenePT [66], Geneformer [67], Nicheformer [68], and Cell2Sentence [69] represent cells based on their gene expression to differentiate cell types and understand gene perturbations. NatureLM [70] trained a foundation model that represents small molecules, proteins, RNA, and materials as sequences over a wide variety of scientific tasks. + +Agentic Systems. Unlike traditional passive models, agentic systems proactively choose actions to achieve goals [71, 72, 73, 74, 75], involving planning [76, 77, 78, 79, 80] and interaction with external tools [81, 82, 83, 84]. LLMs have enabled such systems by processing complex information and generating action-driving responses. The ReAct framework [22] combines reasoning, action, and observation, with variations incorporating self-reflection [85] or model architectures for internal tool usage [82]. Agentic frameworks enable automating tasks like software development [73, 86, 87, 88] and scientific research [89, 90, 91] including biomedical applications such as nanobody design [90], drug discovery [92], or reaction optimization [93]. ChemCrow [92] is an agent designed to perform chemistry experiments in drug discovery and materials design. The coscientist by Boiko et al. [93] designs and performs chemical experiments by integrating web knowledge, code execution, and experiment automation, demonstrating successful reaction optimization of palladium-catalysed cross-couplings. The multi-agent system AI co-scientist [88] is designed for hypothesis generation over a variety of scientific fields. TxAgent was developed as an agentic framework that provides multi-step reasoning and tool use aimed towards therapeutic applications, processing clinical information to support tasks like treatment recommendation [94]. In contrast to recommending existing therapeutics, Agentic-Tx generally focuses on developing new therapeutics. + +# 5 Discussion + +TxGemma's performance suggests a paradigm shift in therapeutic AI development, demonstrating the viability of generalist LLMs. Despite the established dominance of specialist models in niche areas, TxGemma, a relatively lightweight and efficient generalist, achieves competitive results across a wide array of therapeutic tasks. This highlights the potential for broadly trained LLMs, such as those leveraging the comprehensive dataset Therapeutics Data Commons (TDC), to serve as powerful preliminary tools for hypothesis generation, information synthesis, and candidate prioritization. While specialist models would likely retain their value for complex, domain-specific challenges, future research should explore synergistic approaches that combine the strengths of both generalist and specialist therapeutic AI. + +A significant advancement with TxGemma-Chat is its ability to provide reasoning for its predictions, a first in therapeutic AI and a feature lost in TxGemma-Predict, likely due to "catastrophic forgetting" [95]. While explainability may introduce a small trade-off in raw predictive power, it provides a crucial window into the model's decision-making, a factor of paramount importance in therapeutic development. For instance, explaining blood-brain barrier permeability based on molecular structure provides valuable insights for medicinal chemists. Beyond its research applications, TxGemma-Chat holds a significant educational potential, enabling students and researchers to explore complex therapeutic concepts. At the same time, it is important to acknowledge that provided explanations are correlations, not necessarily causal, and must be interpreted with caution. The model's occasional inability to explain certain predictions reveals its knowledge boundaries. Future research should prioritize improving reliability and comprehensive explanations. Even with current limitations, TxGemma-Chat represents an important improvement over the "black-box" paradigm. + +Expanding beyond single-step predictions, Agentic-Tx demonstrates the potential for LLMs to orchestrate complex workflows. By integrating TxGemma with a suite of external tools (PubMed, Wikipedia, chemical databases, etc), Agentic-Tx can tackle multi-step reasoning tasks that would be difficult for a standalone LLM. Its strong performance on benchmarks like ChemBench Chemical Preference and Humanity's Last Exam (HLE) highlights the synergistic value of integrating domain-specific knowledge from TxGemma with general reasoning and information retrieval. This modular, tool-based design further ensures flexibility and extensibility, allowing for future integration of new tools and data. Importantly, it solves the issue of knowledge cut-off in LLMs by providing access to up-to-date information. Agentic-Tx with its autonomous and collaborative operation is a powerful asset for augmenting researchers and advancing therapeutic development. + +The data efficiency of TxGemma is clearly demonstrated in fine-tuning experiments on TrialBench. It achieves robust performance on novel tasks with substantially less training data compared to baseline models, showcasing the benefits of pre-training on a broad and diverse dataset like TDC. This efficiency is particularly critical in therapeutic domains, where data is often proprietary and limited. Moreover, our finding that adding textual context, while improving overall results, can dilute the influence of molecular representations emphasizes the importance of balancing the benefits of additional information with strategic feature selection. + +Although our in-silico results across a diverse range of therapeutic tasks are highly encouraging, we acknowledge that TxGemma's performance has not yet been validated in real-world, wet-lab experiments. Prospective validation in these settings represents a crucial next step. However, a cornerstone of this work is our commitment to open model release. By making TxGemma readily accessible to the research community, we aim to facilitate its rigorous validation and adaptation. Researchers can tailor TxGemma to their specific datasets, encompassing tasks and distribution shifts beyond the scope of TDC. Given the predominantly proprietary nature of therapeutic data, we believe this collaborative, community-driven approach is essential for translating TxGemma into tangible therapeutic applications + +# 6 Conclusion + +In conclusion, this work introduced TxGemma, a suite of efficient, generalist LLMs designed to improve therapeutic development. By leveraging extensive therapeutic instruction-tuning datasets and building upon the foundation of Gemma-2, TxGemma achieves exceptional performance across a wide range of predictive and generative therapeutic tasks, surpassing or matching both generalist and specialist state-of-the-art models. Notably, TxGemma's conversational counterparts, a first in therapeutic AI, provide reasoning and explanations, + +moving beyond traditional black-box predictions to facilitate mechanistic understanding and scientific discourse. Furthermore, the integration of TxGemma into an agentic system, Agentic-Tx, demonstrates its capacity to solve complex, multi-step problems, achieving state-of-the-art results on challenging reasoning-intensive tasks. Finally, and critically, the open release of TxGemma empowers the research community and scientist to adapt and refine the models on their own private data, potentially leading to significant advancements in drug discovery and development. Through these contributions, TxGemma represents a meaningful step towards more efficient, transparent, and collaborative AI-driven therapeutic research. + +# Acknowledgments + +This project was a collaboration between teams at Google DeepMind and Google Research. We thank Marcus Brubaker, David Belanger, Justin Chen, and David Steiner for the feedback and insight which significantly contributed to the enhancement of this report. We thank Tris Warkentin, Glenn Cameron, Victor Cotruta, Fereshteh Mahvar, Tiffany Chen, Omar Sansevier, Kathleen Kenealy, Joe Fernandez, Gus Martins, Nabila Babar, Sara Smoot, Antonia Paterson, Pankil Botadra, Metin Toksoz-Exley, Tim Thelin, Can "John" Kirmizi, and Fayaz Jamil for their collaborative efforts in enabling the open model launch of TxGemma. We also thank Phoebe Kirk, Rachelle Sico, Yun Liu, Anand Rao, Jon Small, Juanita Bawagan, Jane Park, Jenn Sturgeon, Fred Alcober, Samantha Heyman, Abhinav Das for their valuable insights and technical support. We are also grateful to Zoubin Ghahramani, Raia Hadsell, Avinatan Hassidim, Katherine Chou, Dale Webster, Jon Shlens, and Pushmeet Kohli for their support during the course of this project. + +# Inclusion and ethics + +While AI offers transformative potential in drug discovery, ethical considerations and transparency remain crucial. Biases in training data can lead to inequities, highlighting the need for diverse datasets and explainable AI systems. Our model, while still in the research stage, highlights the continuous need for development and refinement in this field. We acknowledge the difficulty in explaining the inner workings of complex models, but remain dedicated to advancing research in this area. + +# Data availability + +The Therapeutics Data Commons (TDC) datasets used for developing, benchmarking, and evaluating TxGemma are publicly available on their website. The benchmarking datasets used in this study—Humanity's Last Exam (HLE), GPQA (Diamond), ChemBench, and TrialBench (Serious Adverse Event Prediction)—are all publicly available via their respective websites. + +# Code availability + +All of the components used in this work are available publicly. For reproducibility, we have documented technical methods and data curation detail in depth, while keeping the paper accessible to clinical and general scientific audiences. Specifically, all the data needs to reproduce this work is publicly accessible to the community. TxGemma, a collection of lightweight state-of-the-art, open language models, are provided for researchers in three model size of 2B, 9B, and 27B and is accessible through Vertex AI Model Garden and Hugging Face. TxGemma's Github repository including supporting code and colab notebooks for quick start are also available at: https://github.com/google-gemini/gemma-cookbook/tree/main/TxGemma. We have specifically provided starter colabs for inference, fine-tuning, and exploring agentic capabilities. TxGemma remains a research model and requires refinement. We look forward to working with research partners, regulators, and providers to validate and explore safe onward uses of TxGemma. + +# Author Contributions + +E.W., S.S., and S.A. made substantial contributions to the conception, design, and evaluation of this work. They played a key role in data analysis, interpretation of results, and the drafting and revision of the manuscript. P.F.J. contributed to drafting and revision of the manuscript. F.Z. contributed to the data processing and model training in the manuscript. R.P. contributed to obtaining necessary legal approvals, + +and organizational support. All authors participated in critically reviewing and revising the manuscript and interpreting the data and findings. + +# Competing interests + +This study was funded by Alphabet Inc and/or a subsidiary thereof ('Alphabet'). E.W., S.S., P.F.J., F.Z., R.P., Y.M., J.B., D.F., and S.A. are employees of Alphabet and may own stock as part of the standard compensation package. + +# References + +1. Mirza, A., Alampara, N., Kunchapu, S., Rios-Garcia, M., Emoekabu, B., Krishnan, A., Gupta, T., Schilling-Wilhelmi, M., Okereke, M., Aneesh, A., et al. Are large language models superhuman chemists? arXiv preprint arXiv:2404.01475 (2024). +2. OpenAI. Learning to Reason with LLMs https://openai.com/index/learning-to-reason-with-llms/. Accessed: Wednesday 9th April, 2025. 2024. +3. Sun, D., Gao, W., Hu, H. & Zhou, S. Why $90\%$ of clinical drug development fails and how to improve it? Acta Pharmaceutica Sinica B 12, 3049-3062 (2022). +4. Hinkson, I. V., Madej, B. & Stahlberg, E. A. Accelerating therapeutics for opportunities in medicine: a paradigm shift in drug discovery. Frontiers in pharmacology 11, 770 (2020). +5. Kumar, A., Voet, A. & Zhang, K. Y. Fragment based drug design: from experimental to computational approaches. *Current medicinal chemistry* 19, 5128-5147 (2012). +6. Velez-Arce, A., Huang, K., Li, M. M., Lin, X., Gao, W., Fu, T., Kellis, M., Pentelute, B. L. & Zitnik, M. TDC-2: Multimodal foundation for therapeutic science. bioRxiv, 2024-06 (2024). +7. Huang, K., Fu, T., Gao, W., Zhao, Y., Roohani, Y., Leskovec, J., Coley, C. W., Xiao, C., Sun, J. & Zitnik, M. Therapeutics data commons: Machine learning datasets and tasks for drug discovery and development. arXiv preprint arXiv:2102.09548 (2021). +8. Huang, K., Fu, T., Gao, W., Zhao, Y., Roohani, Y., Leskovec, J., Coley, C. W., Xiao, C., Sun, J. & Zitnik, M. Artificial intelligence foundation for therapeutic science. Nature chemical biology 18, 1033-1036 (2022). +9. Bubeck, S., Chandrasekaran, V., Eldan, R., Gehrke, J., Horvitz, E., Kamar, E., Lee, P., Lee, Y. T., Li, Y., Lundberg, S., et al. Sparks of artificial general intelligence: Early experiments with GPT-4. arXiv preprint arXiv:2303.12712 (2023). +10. Taylor, R., Kardas, M., Cucurull, G., Scialom, T., Hartshorn, A., Saravia, E., Poulton, A., Kerkez, V. & Stojnic, R. Galactica: A large language model for science. arXiv preprint arXiv:2211.09085 (2022). +11. Telenti, A., Auli, M., Hie, B. L., Maher, C., Saria, S. & Ioannidis, J. P. Large language models for science and medicine. European journal of clinical investigation 54, e14183 (2024). +12. Chaves, J. M. Z., Wang, E., Tu, T., Vaishnav, E. D., Lee, B., Mahdavi, S. S., Semturs, C., Fleet, D., Natarajan, V. & Azizi, S. Tx-LLM: A Large Language Model for Therapeutics. arXiv preprint arXiv:2406.06316 (2024). +13. Team, G., Mesnard, T., Hardin, C., Dadashi, R., Bhupatiraju, S., Pathak, S., Sifre, L., Riviere, M., Kale, M. S., Love, J., et al. Gemma: Open models based on gemini research and technology. arXiv preprint arXiv:2403.08295 (2024). +14. Team, G., Riviere, M., Pathak, S., Sessa, P. G., Hardin, C., Bhupatiraju, S., Hussenot, L., Mesnard, T., Shahriari, B., Ramé, A., et al. Gemma 2: Improving open language models at a practical size. arXiv preprint arXiv:2408.00118 (2024). +15. Phan, L., Gatti, A., Han, Z., Li, N., Hu, J., Zhang, H., Shi, S., Choi, M., Chopra, A., et al. Humanity's Last Exam. arXiv preprint arXiv:2501.14249 (2025). +16. Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J. D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al. Language models are few-shot learners. Advances in neural information processing systems 33, 1877-1901 (2020). +17. Longpre, S., Hou, L., Vu, T., Webson, A., Chung, H. W., Tay, Y., Zhou, D., Le, Q. V., Zoph, B., Wei, J., et al. The FLAN collection: Designing data and methods for effective instruction tuning in International Conference on Machine Learning (2023), 22631-22648. +18. Team, G., Anil, R., Borgeaud, S., Alayrac, J.-B., Yu, J., Soricut, R., Schalkwyk, J., Dai, A. M., Hauth, A., Millican, K., et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805 (2023). +19. Landrum, G. RDKit: Open-Source Cheminformatics Software. https://github.com/rdkit/rdkit/releases/tag/Release_2016_09_4 (2016). +20. Dalke, A. The chemfp project. Journal of cheminformatics 11, 1-21 (2019). +21. Sievers, F., Wilm, A., Dineen, D., Gibson, T. J., Karplus, K., Li, W., Lopez, R., McWilliam, H., Remmert, M., Söding, J., et al. Fast, scalable generation of high-quality protein multiple sequence alignments using Clustal Omega. Molecular systems biology 7, 539 (2011). +22. Yao, S., Zhao, J., Yu, D., Du, N., Shafran, I., Narasimhan, K. & Cao, Y. React: Synergizing reasoning and acting in language models. arXiv preprint arXiv:2210.03629 (2022). +23. Yu, B., Baker, F. N., Chen, Z., Ning, X. & Sun, H. Llasmol: Advancing large language models for chemistry with a large-scale, comprehensive, high-quality instruction tuning dataset. arXiv preprint arXiv:2402.09391 (2024). +24. Mendez-Lucio, O., Nicolaou, C. A. & Earnshaw, B. MolE: a foundation model for molecular graphs using disentangled attention. Nature Communications 15, 9431 (2024). +25. Team, G. Gemma 3 technical report. Google DeepMind (2025). +26. Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D. & Steinhardt, J. Measuring massive multitask language understanding. arXiv preprint arXiv:2009.03300 (2020). +27. Rein, D., Hou, B. L., Stickland, A. C., Petty, J., Pang, R. Y., Dirani, J., Michael, J. & Bowman, S. R. Gpqa: A graduate-level google-proof q@a benchmark in First Conference on Language Modeling (2024). +28. Skarlinski, M. D., Cox, S., Laurent, J. M., Braza, J. D., Hinks, M., Hammerling, M. J., Ponnapati, M., Rodriques, S. G. & White, A. D. Language agents achieve superhuman synthesis of scientific knowledge. arXiv preprint arXiv:2409.13740 (2024). +29. Chen, J., Hu, Y., Wang, Y., Lu, Y., Cao, X., Lin, M., Xu, H., Wu, J., Xiao, C., Sun, J., et al. TrialBench: Multi-modal artificial intelligence-ready clinical trial datasets. arXiv preprint arXiv:2407.00631 (2024). +30. Torng, W. & Altman, R. B. Graph convolutional neural networks for predicting drug-target interactions. Journal of chemical information and modeling 59, 4131-4149 (2019). + +31. Stärk, H., Ganea, O., Pattanaik, L., Barzilay, R. & Jaakkola, T. Equibind: Geometric deep learning for drug binding structure prediction in International conference on machine learning (2022), 20503-20521. +32. Xiong, Z., Wang, D., Liu, X., Zhong, F., Wan, X., Li, X., Li, Z., Luo, X., Chen, K., Jiang, H., et al. Pushing the boundaries of molecular representation for drug discovery with the graph attention mechanism. Journal of medicinal chemistry 63, 8749-8760 (2019). +33. Heid, E. & Green, W. H. Machine learning of reaction properties via learned representations of the condensed graph of reaction. Journal of Chemical Information and Modeling 62, 2101-2110 (2021). +34. Yang, K., Swanson, K., Jin, W., Coley, C., Eiden, P., Gao, H., Guzman-Perez, A., Hopper, T., Kelley, B., Mathea, M., et al. Analyzing learned molecular representations for property prediction. Journal of chemical information and modeling 59, 3370-3388 (2019). +35. Morrone, J. A., Weber, J. K., Huynh, T., Luo, H. & Cornell, W. D. Combining docking pose rank and structure with deep learning improves protein-ligand binding mode prediction over a baseline docking approach. Journal of chemical information and modeling 60, 4170-4179 (2020). +36. Mohr, B., Shmilovich, K., Kleinwächter, I. S., Schneider, D., Ferguson, A. L. & Bereau, T. Data-driven discovery of cardiolipin-selective small molecules by computational active learning. Chemical Science 13, 4498-4511 (2022). +37. Stokes, J. M., Yang, K., Swanson, K., Jin, W., Cubillos-Ruiz, A., Donghia, N. M., MacNair, C. R., French, S., Carfrae, L. A., Bloom-Ackermann, Z., et al. A deep learning approach to antibiotic discovery. Cell 180, 688-702 (2020). +38. Rogers, D. & Hahn, M. Extended-connectivity fingerprints. Journal of chemical information and modeling 50, 742-754 (2010). +39. Tayyebi, A., Alshami, A. S., Rabiei, Z., Yu, X., Ismail, N., Talukder, M. J. & Power, J. Prediction of organic compound aqueous solubility using machine learning: a comparison study of descriptor-based and fingerprints-based models. Journal of Cheminformatics 15, 99 (2023). +40. Belenahalli Shekarappa, S., Kandagalla, S. & Lee, J. Development of machine learning models based on molecular fingerprints for selection of small molecule inhibitors against JAK2 protein. Journal of Computational Chemistry 44, 1493-1504 (2023). +41. Huang, K., Chandak, P., Wang, Q., Havaldar, S., Vaid, A., Leskovec, J., Nadkarni, G. N., Glicksberg, B. S., Gehlenborg, N. & Zitnik, M. A foundation model for clinician-centered drug repurposing. Nature Medicine, 1-13 (2024). +42. Jumper, J., Evans, R., Pritzel, A., Green, T., Figurnov, M., Ronneberger, O., Tunyasuvunakool, K., Bates, R., Zidek, A., Potapenko, A., et al. Highly accurate protein structure prediction with AlphaFold. nature 596, 583-589 (2021). +43. Tunyasuvunakool, K., Adler, J., Wu, Z., Green, T., Zielinski, M., Žídek, A., Bridgland, A., Cowie, A., Meyer, C., Laydon, A., et al. Highly accurate protein structure prediction for the human proteome. Nature 596, 590-596 (2021). +44. Senior, A. W., Evans, R., Jumper, J., Kirkpatrick, J., Sifre, L., Green, T., Qin, C., Zidek, A., Nelson, A. W., Bridgland, A., et al. Improved protein structure prediction using potentials from deep learning. Nature 577, 706-710 (2020). +45. Abramson, J., Adler, J., Dunger, J., Evans, R., Green, T., Pritzel, A., Ronneberger, O., Willmore, L., Ballard, A. J., Bambrick, J., et al. Accurate structure prediction of biomolecular interactions with AlphaFold 3. Nature, 1-3 (2024). +46. Zambaldi, V., La, D., Chu, A. E., Patani, H., Danson, A. E., Kwan, T. O., Frerix, T., Schneider, R. G., Saxton, D., Thillaisundaram, A., et al. De novo design of high-affinity protein binders with AlphaProteo. arXiv preprint arXiv:2409.08022 (2024). +47. Ren, F., Ding, X., Zheng, M., Korzinkin, M., Cai, X., Zhu, W., Mantsyzov, A., Aliper, A., Aladinskiy, V., Cao, Z., et al. AlphaFold accelerates artificial intelligence powered drug discovery: efficient discovery of a novel CDK20 small molecule inhibitor. Chemical science 14, 1443-1452 (2023). +48. Vaswani, A. Attention is all you need. Advances in Neural Information Processing Systems (2017). +49. Zhang, S., Dong, L., Li, X., Zhang, S., Sun, X., Wang, S., Li, J., Hu, R., Zhang, T., Wu, F., et al. Instruction tuning for large language models: A survey. arXiv preprint arXiv:2308.10792 (2023). +50. Kaufmann, T., Weng, P., Bengs, V. & Hüllermeier, E. A survey of reinforcement learning from human feedback. arXiv preprint arXiv:2312.14925 (2023). +51. Liu, Y. & Lapata, M. Text summarization with pretrained encoders. arXiv preprint arXiv:1908.08345 (2019). +52. Kenton, J. D. M.-W. C. & Toutanova, L. K. BERT: Pre-training of deep bidirectional transformers for language understanding in Proceedings of naacL-HLT 1 (2019). +53. Luo, X., Rechardt, A., Sun, G., Nejad, K. K., Yáñez, F., Yilmaz, B., Lee, K., Cohen, A. O., Borghesani, V., Pashkov, A., et al. Large language models surpass human experts in predicting neuroscience results. Nature human behaviour, 1-11 (2024). +54. Seidl, P., Vall, A., Hochreiter, S. & Klambauer, G. Enhancing activity prediction models in drug discovery with the ability to understand human language in International Conference on Machine Learning (2023), 30458-30490. +55. Rives, A., Meier, J., Sercu, T., Goyal, S., Lin, Z., Liu, J., Guo, D., Ott, M., Zitnick, C. L., Ma, J., et al. Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences. Proceedings of the National Academy of Sciences 118, e2016239118 (2021). +56. Lin, Z., Akin, H., Rao, R., Hie, B., Zhu, Z., Lu, W., Smetanin, N., Verkuil, R., Kabeli, O., Shmueli, Y., et al. Evolutionary-scale prediction of atomic-level protein structure with a language model. Science 379, 1123-1130 (2023). +57. Alley, E. C., Khimulya, G., Biswas, S., AlQuraishi, M. & Church, G. M. Unified rational protein engineering with sequence-based deep representation learning. Nature methods 16, 1315-1322 (2019). +58. Ferruz, N., Schmidt, S. & Höcker, B. ProtGPT2 is a deep unsupervised language model for protein design. Nature communications 13, 4348 (2022). +59. Nguyen, E., Poli, M., Durrant, M. G., Kang, B., Katrekar, D., Li, D. B., Bartie, L. J., Thomas, A. W., King, S. H., Brixi, G., et al. Sequence modeling and design from molecular to genome scale with Evo. Science 386, eado9336 (2024). + +60. Dalla-Torre, H., Gonzalez, L., Mendoza-Revilla, J., Lopez Carranza, N., Grzywaczewski, A. H., Oteri, F., Dallago, C., Trop, E., de Almeida, B. P., Sirelkhatim, H., et al. Nucleotide Transformer: building and evaluating robust foundation models for human genomics. Nature Methods, 1-11 (2024). +61. Cornman, A., West-Roberts, J., Camargo, A. P., Roux, S., Beracochea, M., Mirdita, M., Ovchinnikov, S. & Hwang, Y. The OMG dataset: An Open MetaGenomic corpus for mixed-modality genomic language modeling. bioRxiv, 2024-08 (2024). +62. Zhuo, L., Chi, Z., Xu, M., Huang, H., Zheng, H., He, C., Mao, X.-L. & Zhang, W. Protllm: An interleaved protein-language llm with protein-as-word pre-training. arXiv preprint arXiv:2403.07920 (2024). +63. Pei, Q., Zhang, W., Zhu, J., Wu, K., Gao, K., Wu, L., Xia, Y. & Yan, R. Biot5: Enriching cross-modal integration in biology with chemical knowledge and natural language associations. arXiv preprint arXiv:2310.07276 (2023). +64. Anonymous. Parameter Efficient Graph Encoding for Large Language Models 2025. https://openreview.net/forum?id=RbcXV63ZJk. +65. Cui, H., Wang, C., Maan, H., Pang, K., Luo, F., Duan, N. & Wang, B. scGPT: toward building a foundation model for single-cell multi-omics using generative AI. Nature Methods, 1-11 (2024). +66. Chen, Y. & Zou, J. GenePT: a simple but effective foundation model for genes and cells built from ChatGPT. bioRxiv, 2023-10 (2024). +67. Theodoris, C. V., Xiao, L., Chopra, A., Chaffin, M. D., Al Sayed, Z. R., Hill, M. C., Mantineo, H., Brydon, E. M., Zeng, Z., Liu, X. S., et al. Transfer learning enables predictions in network biology. Nature 618, 616-624 (2023). +68. Schaar, A. C., Tejada-Lapuerta, A., Palla, G., Gutgesell, R., Halle, L., Minaeva, M., Vornholz, L., Dony, L., Drummer, F., Bahrami, M., et al. Nicheformer: a foundation model for single-cell and spatial omics. bioRxiv, 2024-04 (2024). +69. Levine, D., Rizvi, S. A., Lévy, S., Pallikkavaliyaveetil, N., Zhang, D., Chen, X., Ghadermarzi, S., Wu, R., Zheng, Z., Vrkic, I., et al. Cell2Sentence: teaching large language models the language of biology. BioRxiv, 2023-09 (2023). +70. Xia, Y., Jin, P., Xie, S., He, L., Cao, C., Luo, R., Liu, G., Wang, Y., Liu, Z., Chen, Y.-J., et al. NatureLM: Deciphering the Language of Nature for Scientific Discovery. arXiv preprint arXiv:2502.07527 (2025). +71. Wang, L., Ma, C., Feng, X., Zhang, Z., Yang, H., Zhang, J., Chen, Z., Tang, J., Chen, X., Lin, Y., et al. A survey on large language model based autonomous agents. Frontiers of Computer Science 18, 186345 (2024). +72. Shanahan, M., McDonell, K. & Reynolds, L. Role play with large language models. Nature 623, 493-498 (2023). +73. Qian, C., Cong, X., Yang, C., Chen, W., Su, Y., Xu, J., Liu, Z. & Sun, M. Communicative agents for software development. arXiv preprint arXiv:2307.07924 6 (2023). +74. Hong, S., Zheng, X., Chen, J., Cheng, Y., Wang, J., Zhang, C., Wang, Z., Yau, S. K. S., Lin, Z., Zhou, L., et al. Metagpt: Meta programming for multi-agent collaborative framework. arXiv preprint arXiv:2308.00352 (2023). +75. Talebirad, Y. & Nadiri, A. Multi-agent collaboration: Harnessing the power of intelligent llm agents. arXiv preprint arXiv:2306.03314 (2023). +76. Hao, S., Gu, Y., Ma, H., Hong, J. J., Wang, Z., Wang, D. Z. & Hu, Z. Reasoning with language model is planning with world model. arXiv preprint arXiv:2305.14992 (2023). +77. Huang, W., Abbeel, P., Pathak, D. & Mordatch, I. Language models as zero-shot planners: Extracting actionable knowledge for embodied agents in International conference on machine learning (2022), 9118-9147. +78. Song, C. H., Wu, J., Washington, C., Sadler, B. M., Chao, W.-L. & Su, Y. Lm-planner: Few-shot grounded planning for embodied agents with large language models in Proceedings of the IEEE/CVF International Conference on Computer Vision (2023), 2998-3009. +79. Wang, Z., Cai, S., Chen, G., Liu, A., Ma, X. & Liang, Y. Describe, explain, plan and select: Interactive planning with large language models enables open-world multi-task agents. arXiv preprint arXiv:2302.01560 (2023). +80. Yao, S., Yu, D., Zhao, J., Shafran, I., Griffiths, T., Cao, Y. & Narasimhan, K. Tree of thoughts: Deliberate problem solving with large language models. Advances in Neural Information Processing Systems 36 (2024). +81. Parisi, A., Zhao, Y. & Fiedel, N. Talm: Tool augmented language models. arXiv preprint arXiv:2205.12255 (2022). +82. Schick, T., Dwivedi-Yu, J., Dessi', R., Raileanu, R., Lomeli, M., Hambro, E., Zettlemoyer, L., Cancedda, N. & Scialom, T. Toolformer: Language models can teach themselves to use tools. Advances in Neural Information Processing Systems 36, 68539-68551 (2023). +83. Qin, Y., Hu, S., Lin, Y., Chen, W., Ding, N., Cui, G., Zeng, Z., Zhou, X., Huang, Y., Xiao, C., et al. Tool learning with foundation models. ACM Computing Surveys 57, 1-40 (2024). +84. Cai, T., Wang, X., Ma, T., Chen, X. & Zhou, D. Large language models as tool makers. arXiv preprint arXiv:2305.17126 (2023). +85. Shinn, N., Cassano, F., Gopinath, A., Narasimhan, K. & Yao, S. Reflexion: Language agents with verbal reinforcement learning. Advances in Neural Information Processing Systems 36 (2024). +86. Yang, J., Jimenez, C. E., Wettig, A., Lieret, K., Yao, S., Narasimhan, K. & Press, O. Swe-agent: Agent-computer interfaces enable automated software engineering. arXiv preprint arXiv:2405.15793 (2024). +87. Qian, C., Dang, Y., Li, J., Liu, W., Chen, W., Yang, C., Liu, Z. & Sun, M. Experiential co-learning of software-developing agents. arXiv preprint arXiv:2312.17025 (2023). +88. Gottweis, J., Weng, W.-H., Daryin, A., Tu, T., Palepu, A., Sirkovic, P., Myaskovsky, A., Weissenberger, F., Rong, K., Tanno, R., et al. Towards an AI co-scientist. arXiv preprint arXiv:2502.18864 (2025). +89. Schmidgall, S., Su, Y., Wang, Z., Sun, X., Wu, J., Yu, X., Liu, J., Liu, Z. & Barsoum, E. Agent Laboratory: Using LLM Agents as Research Assistants. arXiv preprint arXiv:2501.04227 (2025). +90. Swanson, K., Wu, W., Bulaong, N. L., Pak, J. E. & Zou, J. The virtual lab: Ai agents design new sars-cov-2 nanobodies with experimental validation. bioRxiv, 2024-11 (2024). +91. Lu, C., Lu, C., Lange, R. T., Foerster, J., Clune, J. & Ha, D. The ai scientist: Towards fully automated open-ended scientific discovery. arXiv preprint arXiv:2408.06292 (2024). + +92. M. Bran, A., Cox, S., Schilter, O., Baldassari, C., White, A. D. & Schwaller, P. Augmenting large language models with chemistry tools. Nature Machine Intelligence, 1-11 (2024). +93. Boiko, D. A., MacKnight, R., Kline, B. & Gomes, G. Autonomous chemical research with large language models. Nature 624, 570-578 (2023). +94. Gao, S., Zhu, R., Kong, Z., Noori, A., Su, X., Ginder, C., Tsiligkaridis, T. & Zitnik, M. TxAgent: An AI Agent for Therapeutic Reasoning Across a Universe of Tools. arXiv preprint arXiv:2503.10970 (2025). +95. Aleixo, E. L., Colonna, J. G., Cristo, M. & Fernandes, E. Catastrophic forgetting in deep learning: A comprehensive taxonomy. arXiv preprint arXiv:2312.10549 (2023). + +# Supplementary Material + +# Version control + +# V0 (25 March 2025) $\rightarrow$ V1 + +- Upgraded the Agentic-Tx system's orchestrator from Gemini 2.0 to Gemini 2.5. This enhancement results in significant performance improvements in complex workflow orchestration, as detailed in Table 3. +- Added performance results of TxGemma-Predict and TxGemma-Chat (trained only on commercially licensed datasets) for binary classification (Table S.17), regression, and generation tasks (Table S.18). + +# A Summary + +Data details as listed in Section B: + +- Table S.1: Excluded TDC tasks and reasons for exclusion. +- Table S.2: Number of samples in training, validation, and test sets for all binary classification tasks. +- Table S.3: Number of samples in training, validation, and test sets for all regression and generation tasks. +- Table S.4: Descriptions of the binary classification tasks. +- Table S.5: Descriptions of the regression and generation tasks. +- Table S.6 Types of features in the processed TDC data along with illustrative examples. +Figure S.1: Distribution of TDC task sizes, aggregated over train, validation, and test sets. + +Method and modeling details as listed in Section C: + +- Table S.7 Examples of prompts for binary classification tasks. +- Table S.8 Examples of prompts for regression and generation tasks. +- Table S.9 Example of a 10-shot prompt for a binary classification task. +- Table S.10 Example of prompts for predicting adverse events in clinical trials. +- Table S.11 Example of Agentic-Tx response to a chemical preference question. +- Table S.12 List of tools available to Agentic-Tx. +- Figure S.2 Distribution of Tanimoto similarities for 10 nearest neighbors by dataset splits in the AMES task. +- Section C.1 Details about Wilcoxon signed-rank test used to assess model performance. + +Additional results as listed in Section D: + +- Additional prediction results for TxGemma (Section D.1) + +* Table S.13 Performance on binary classification tasks for specialist SOTA, base Gemma-2, and TxGemma-Predict models. +* Table S.14 Performance on regression and generation tasks for specialist SOTA, base Gemma-2, and TxGemma-Predict models. +* Table S.15 Performance on binary classification tasks for TxGemma-Predict, TxGemma-Chat, and Tx-LLM models. +* Table S.16 Performance on regression and generation tasks for TxGemma-Predict, TxGemma-Chat, and Tx-LLM models. +* Table S.17 Performance on binary classification tasks for TxGemma-Predict and TxGemma-Chat models trained only on datasets with commercial licenses. +* Table S.18 Performance on regression and generation tasks for TxGemma-Predict and TxGemma-Chat models trained only on datasets with commercial licenses. +* Figure S.4 Performance of TxGemma-27B-Predict compared to generalist and specialist SOTA models. +* Figure S.5 Comparison of TxGemma-27B-Predict with LlaSMol on select small molecule tasks. +* Figure S.6 Comparison of TxGemma-27B-Predict with MolE on select small molecule tasks. +* Figure S.11 Inference speed of TxGemma models at various sizes. + +* Figure S.12 Percent contamination for datasets and cosine similarity analysis. +* Figure S.13 Performance on contaminated datasets before and after filtering out contaminated datapoints. +* Figure S.16 Performance by feature type of all TxGemma-Predict sizes. +* Figure S.17 Comparison of TxGemma-Predict performances over different sizes and with Gemma-2 models. +* Figure S.18 Correlations of TxGemma-27B-Predict predictions for toxicity and clinical trial approval tasks. + +- Conversing with TxGemma-27B-Predict and TxGemma-27B-Chat (Section D.2) + +* Figure S.7 Comparison of TxGemma-27B-Predict, TxGemma-27B-Chat, and Gemma-2-27B on MMLU. +* Figure S.8 Example of a dialogue with TxGemma-27B-Predict about general topics. +* Figure S.9 Example of a multi-turn dialogue with TxGemma-27B-Predict about its predictions. +* Figure S.10 Example of a prompt format the enables TxGemma-Chat to provide reasoning for challenging tasks. + +- Additional Agentic-Tx Results (Section D.3) + +* Figure S.14 Agentic-Tx tool use frequencies for chemical preference and HLE benchmarks. +* Figure S.15 Agentic-Tx tool use frequency per question for chemical preference questions. + +- Proof-of-concept example using TxGemma (Section D.4) + +* Figure S.3 Illustration of a possible application of TxGemma to end-to-end therapeutic development. + +# B Data details + +This section provides a breakdown of the tasks used in our study, including information on excluded tasks and the size of training, validation, and test sets for binary classification, regression, and generation tasks. + +As previously mentioned, we excluded a small number of tasks from TDC for various reasons. Table S.1 provides an overview of the excluded tasks and the rationale behind their exclusion. The primary reasons for exclusion were the tasks' relevance to the study, limitations of LLMs, and specific data characteristics, such as the absence of clear metrics or redundancy. For instance, tasks like QM7b, QM8, and QM9, which focus on predicting quantum properties, were not directly relevant to the study's focus on therapeutic development. Similarly, IEDB Jespersen and PDB Jespersen were excluded due to their small size and the complexity of implementing token prediction, as opposed to binary classification, within an LLM framework. Tasks such as DrugBank DDI, TWOSIDES, and USPTO Catalyst posed challenges due to the large number of potential labels, making them difficult for LLMs to process effectively. MOSES, ZINC, and ChEMBL were excluded because they lacked well-defined evaluation metrics. Finally, USPTO 50K and USPTO Reaction were excluded as they either overlapped with or were subsets of the USPTO task. + +Tables S.2 and S.3 specify the number of samples in the training, validation, and test sets for the included binary classification, regression, and generation tasks, respectively. Substantial variability in task sizes across different tasks is shown in these tables. The binary classification tasks range from 196 to 1,406,988 samples, while the regression and generation tasks range from 345 to 775,767 samples. This variability highlights the diverse data availability landscape across various tasks. Figure S.1 provides a visual representation of the distribution of TDC task sizes, aggregated across train, validation, and test sets. For tasks encompassing multiple subtasks, like ToxCast, the task size is computed by summing the sizes of each individual dataset. + +![](images/3765c58d0679b30005a3a6896f3f4d67936c40efceb092bbd2821ab8663df4b7.jpg) +Figure S.1 | Distribution of TDC task sizes, aggregated over train, validation, and test sets. For tasks containing multiple datasets, such as ToxCast which contains data for more than 600 different assays, the task size is calculated by summing over the sizes for each dataset. + +Tables S.4 and S.5 provide a brief description of the tasks, as well as the types of inputs (e.g. protein, small molecules, etc.). These tasks are diverse and encompass many different aspects of development. Some tasks corresponding to gene-disease association or protein-protein interaction prediction are useful for early-stage development, in order to identify mechanisms of disease and relevant targets. Predictions of antibody affinity, drug-target interaction, high-throughput screening, drug synergy are useful for intermediate development steps that involve proposing candidate therapeutics based on their interaction with a target. Predictions of toxicity, pharmacokinetics, and developability are useful for filtering candidates down based on favorable druglike properties. Predictions of clinical trial outcome, reaction yields, retrosynthesis are useful for late-stage development where understanding the likelihood of clinical trial approval and manufacturing potential are critical. There are also tasks that are highly specific for particular therapeutics types, which include predictions of CRISPR repair, peptide-MHC binding, miRNA-Target interaction, and TCR-epitope binding. + +Binary classification tasks always output “(A)” or “(B)”, where “(A)” is a negative answer to the question which is specified in the prompt and “(B)” is a positive answer. Regression tasks output an integer between + +0 and 1000, which can be transformed back into the original task-specific label space. The output of the USPTO generation task is the SMILES string of the predicted molecules. Table S.6 lists the different types of inputs in the processed TDC data along with illustrative examples. + +Table S.1 | Excluded TDC tasks and reasons for exclusion. The tasks were excluded primarily due to their relevance to the study, limitations inherent to large language models (LLMs), and specific data characteristics, such as a lack of clear evaluation metrics or redundancy. + +
Task NameReason for Exclusion
QM7bPrediction of quantum properties is not closely related to therapeutic development.
QM8Prediction of quantum properties is not closely related to therapeutic development.
QM9Prediction of quantum properties is not closely related to therapeutic development.
IEDB JespersenAmount of data is small, and token prediction is more difficult to implement in a LLM than binary classification.
PDB JespersenAmount of data is small, and token prediction is more difficult to implement in a LLM than binary classification.
DrugBank DDILarge number of possible labels is difficult to implement in a LLM.
TWOSIDESLarge number of possible labels is difficult to implement in a LLM.
USPTO CatalystLarge number of possible labels is difficult to implement in a LLM.
MOSESNo clear metric.
ZINCNo clear metric.
ChEMBLNo clear metric.
USPTO 50KSubset of USPTO.
USPTO ReactionSame data as USPTO.
+ +Table S.2 | Number of samples in training, validation, and test sets for all binary classification tasks. The binary classification tasks range in size from a minimum of 196 samples (Carcinogens Lagunin) to a maximum of 1,406,988 samples (butkiewicz), highlighting the considerable variability in data availability across different tasks. The task type and split type are also indicated following the TDC classification and recommendation. + +
Task NameTask TypeSplit TypeTraining SizeValidation SizeTest Size
AMESToxicityScaffold5,0937281,457
BBB MartinsPharmacokineticsScaffold1,421203406
Bioavailability MaPharmacokineticsScaffold1,344192384
CYP1A2 VeithPharmacokineticsScaffold8,8051,2572,517
CYP2C19 VeithPharmacokineticsScaffold8,8651,2662,534
CYP2C9 Substrate CarbonMangelsPharmacokineticsScaffold46767135
CYP2C9 VeithPharmacokineticsScaffold8,4631,2102,419
CYP2D6 Substrate CarbonMangelsPharmacokineticsScaffold46567135
CYP2D6 VeithPharmacokineticsScaffold9,1911,3132,626
CYP3A4 Substrate CarbonMangelsPharmacokineticsScaffold46867135
CYP3A4 VeithPharmacokineticsScaffold8,6281,2332,467
Carcinogens LaguninToxicityScaffold1962856
ClinToxToxicityScaffold1,034147297
DILIToxicityScaffold3255496
HIA HouPharmacokineticsScaffold40358117
HIV*High-throughput screeningScaffold28,7884,1128,227
HuRIProtein-protein interactionCold-start45,8559873,694
MHC1 IEDB IMGT NielsenPeptide-MHC bindingRandom130,19018,59837,197
MHC2 IEDB JensenPeptide-MHC bindingRandom93,99713,42826,856
PAMPA NCATSPharmacokineticsScaffold1,423203408
Pgp BrocatelliPharmacokineticsScaffold851122245
SARSCOV2 3CLPro DiamondHigh-throughput screeningScaffold61688176
SARSCoV2 Vitro TouretHigh-throughput screeningScaffold1,038148298
SAbDab ChenDevelopabilityRandom1,686241482
Skin ReactionToxicityScaffold2824082
Tox21ToxicityScaffold54,5567,79015,600
ToxCastToxicityScaffold1,073,279153,099307,282
butkiewiczHigh-throughput screeningRandom1,406,988200,99840,1997
hERGToxicityScaffold45766132
hERG KarimToxicityScaffold9,4111,3442,690
herg centralToxicityScaffold214,82530,68961,379
miRTarBasemiRNA-target interactionRandom559,59179,948159,889
phase1Clinical trial outcomeCold-start1,546258598
phase2Clinical trial outcomeCold-start5,7927161,282
phase3Clinical trial outcomeCold-start41,255321,084
weberTCR-epitope bindingCold-start33,0134,7489,421
+ +* To predict whether compounds have Anti-HIV properties. + +Table S.3 | Number of samples in training, validation, and test sets for all regression and generation tasks. The regression and generation tasks vary significantly in size, ranging from a minimum of 345 samples (Protein SAbDab) to a maximum of 775,767 samples (USPTO). The task type and split type are also indicated following the TDC classification and recommendation. + +
Task NameTask TypeSplit TypeTraining SizeValidation SizeTest Size
BindingDB PatentDrug-target interactionTemporal146,80036,63049,028
BindingDB ic50Drug-target interactionCold-start375,1277,53131,495
BindingDB kdDrug-target interactionCold-start19,0343762,321
BindingDB kiDrug-target interactionCold-start57,6561,1894,709
Buchwald HartwigReaction yieldsRandom2,768396791
Caco2 WangPharmacokineticsScaffold63791182
Clearance Hepatocyte AZPharmacokineticsScaffold848122243
Clearance Microsome AZPharmacokineticsScaffold770111221
DAVISDrug-target interactionCold-start12,4552661,064
DisGeNETGene-disease associationRandom39,4255,62111,200
DrugComb BlissDrug synergyCombination207,77229,61859,708
DrugComb CSSDrug synergyCombination207,77229,61859,708
DrugComb HSADrug synergyCombination207,77229,61859,708
DrugComb LoeweDrug synergyCombination207,77229,61859,708
DrugComb ZIPDrug synergyCombination207,77229,61859,708
GDSC1Drug responseRandom124,11717,73135,462
GDSC2Drug responseRandom64,8929,27018,541
Half Life ObachPharmacokineticsScaffold46567135
KIBADrug-target interactionCold-start59,3261,0424,524
LD50 ZhuToxicityScaffold5,1687391,478
LeenayCRISPR repairRandom5,3257601,520
Lipophilicity AstraZenecaPharmacokineticsScaffold2,940420840
OncoPolyPharmacologyDrug synergyCombination16,0142,3314,707
PPBR AZPharmacokineticsScaffold1,952279559
Protein SAbDabAntibody affinityRandom3454999
Solubility AqSolDBPharmacokineticsScaffold6,9889981,996
TAPDevelopabilityRandom845120240
USPTORetrosynthesisRandom775,767110,824221,648
USPTO YieldsReaction yieldsRandom597,54685,364170,728
VDss LombardoPharmacokineticsScaffold791113226
+ +Table S.4 | Inputs and task descriptions for binary classification tasks. All output responses are either (A) for negative or (B) for positive. + +
Task NameInputDescription
AMESSmall moleculeGiven a drug SMILES, predict whether it is mutagenic.
BBB MartinsSmall moleculeGiven a drug SMILES, predict whether it can cross the blood-brain barrier.
Bioavailability MaSmall moleculeGiven a drug SMILES, predict whether it is orally available.
CYP1A2 VeithSmall moleculeGiven a drug SMILES, predict whether it inhibits CYP1A2.
CYP2C19 VeithSmall moleculeGiven a drug SMILES, predict whether it inhibits CYP2C19.
CYP2C9 Substrate CarbonMangelsSmall moleculeGiven a drug SMILES, predict whether it is a substrate to CYP2C9.
CYP2C9 VeithSmall moleculeGiven a drug SMILES, predict whether it inhibits CYP2C9.
CYP2D6 Substrate CarbonMangelsSmall moleculeGiven a drug SMILES, predict whether it is a substrate to CYP2D6.
CYP2D6 VeithSmall moleculeGiven a drug SMILES, predict whether it inhibits CYP2D6.
CYP3A4 Substrate CarbonMangelsSmall moleculeGiven a drug SMILES, predict whether it is a substrate to CYP3A4.
CYP3A4 VeithSmall moleculeGiven a drug SMILES, predict whether it inhibits CYP3A4.
Carcinogens LaguninSmall moleculeGiven a drug SMILES, predict whether it is a carcinogen.
ClinToxSmall moleculeGiven a drug SMILES, predict whether it is toxic.
DILISmall moleculeGiven a drug SMILES, predict whether it can cause liver injury.
HIA HouSmall moleculeGiven a drug SMILES, predict whether it is absorbed in the human intestine.
HIV*Small moleculeGiven a drug SMILES, predict whether it has anti-HIV activity.
HuRIProteinGiven the amino acid sequences of two proteins, predict whether the proteins interact.
MHC1 IEDB IMGT NielsenProteinGiven the amino acid of the peptide and pseudo amino acid of MHC 1, predict whether the peptide binds to the MHC.
MHC2 IEDB JensenProteinGiven the amino acid of the peptide and pseudo amino acid of MHC 2, predict whether the peptide binds to the MHC.
PAMPA NCATSSmall moleculeGiven a drug SMILES, predict whether it is permeable in a PAMPA assay.
Pgp BroccatelliSmall moleculeGiven a drug SMILES, predict whether it inhibits Pgp.
SARSCOV2 3CLPro DiamondSmall moleculeGiven a drug SMILES, predict whether it binds SARS-CoV-2 3CL protease.
SARSCOV2 Vitro TouretSmall moleculeGiven a drug SMILES, predict whether it inhibits SARS-CoV-2 replication.
SAbDab ChenProteinGiven an antibody heavy chain and light chain sequence, whether it is developable.
Skin ReactionSmall moleculeGiven a drug SMILES, predict whether it can cause skin reaction.
Tox21Small moleculeGiven a drug SMILES, predict whether it is toxic in various assays.
ToxCastSmall moleculeGiven a drug SMILES, predict whether it is toxic in various assays.
butkiewiczSmall moleculeGiven a drug SMILES, predict whether it is active against various proteins.
hERGSmall moleculeGiven a drug SMILES, predict whether it blocks hERG.
hERG KarimSmall moleculeGiven a drug SMILES, predict whether it inhibits hERG.
herg centralSmall moleculeGiven a drug SMILES, predict whether it inhibits hERG.
miRTarBase phase1Nucleic acid & proteinGiven the miRNA mature and target amino acid, predict whether they interact.
phase2Small molecule & diseaseGiven a drug SMILES and disease, predict whether the phase 1 trial will be approved.
phase3Small molecule & diseaseGiven a drug SMILES and disease, predict whether the phase 2 trial will be approved.
weberSmall molecule & diseaseGiven a drug SMILES and disease, predict whether the phase 3 trial will be approved.
ProteinGiven the amino acid of the epitope and a T-cell receptor (amino acid of the hypervariable CDR3 loop), predict whether the epitope binds to the TCR.
+ +* To predict whether compounds have Anti-HIV properties. + +Table S.5 | Inputs and task descriptions for regression and generation tasks. Regression task outputs are integers between 0 and 1000, which represents a binned transformation of the original numeric label. On evaluation, the integer output is transformed back into the original numeric label space. For the USPTO generation task, the output is the SMILES string of the predicted set of small molecules. + +
Task NameInputDescription
BindingDB PatentProtein & small moleculeGiven the target amino acid and drug SMILES, predict their binding affinity.
BindingDB ic50ProteinGiven the target amino acid and drug SMILES, predict their IC50.
BindingDB kdProteinGiven the target amino acid and drug SMILES, predict their Kd.
BindingDB kiProteinGiven the target amino acid and drug SMILES, predict their Ki.
Buchwald HartwigSmall moleculeGiven a product, a catalyst, and a reactant SMILES, predict the reaction yield.
Caco2 WangSmall moleculeGiven a drug SMILES, predict the cell effective permeability.
Clearance Hepatocyte AZSmall moleculeGiven a drug SMILES, predict the activity of hepatocyte clearance.
Clearance Microsome AZSmall moleculeGiven a drug SMILES, predict the activity of microsome clearance.
DAVISProtein & small moleculeGiven the target amino acid and drug SMILES, predict their binding affinity.
DisGeNETProtein & diseaseGiven the disease description and the amino acid of the gene, predict their association.
DrugComb BlissSmall molecule & cell lineGiven two drug SMILESs and a cell line description, predict the drug synergy level.
DrugComb CSSSmall molecule & cell lineGiven two drug SMILESs and a cell line description, predict the drug synergy level.
DrugComb HSASmall molecule & cell lineGiven two drug SMILESs and a cell line description, predict the drug synergy level.
DrugComb LoeweSmall molecule & cell lineGiven two drug SMILESs and a cell line description, predict the drug synergy level.
DrugComb ZIPSmall molecule & cell lineGiven two drug SMILESs and a cell line description, predict the drug synergy level.
GDSC1Small molecule & cell lineGiven a drug SMILES and a cell line description, predict the drug sensitivity level.
GDSC2Small molecule & cell lineGiven a drug SMILES and a cell line description, predict the drug sensitivity level.
Half Life ObachSmall moleculeGiven a drug SMILES, predict the half life duration.
KIBAProtein & small moleculeGiven the target amino acid and drug SMILES, predict their binding affinity.
LD50 ZhuSmall moleculeGiven a drug SMILES, predict its LD50 toxicity.
LeenayNucleic acidGiven a GuideSeq sequence, predict various properties.
Lipophilicity AstraZenecaSmall moleculeGiven a drug SMILES, predict the lipophilicity.
OncoPolyPharmacologyCell line & small moleculeGiven two drug SMILESs and a cell line description, predict the drug synergy level.
PPBR AZSmall moleculeGiven a drug SMILES, predict the plasma protein binding rate.
Protein SAbDabProteinGiven the amino acid of the antibody and antigen, predict the binding affinity.
Solubility AqSolDBSmall moleculeGiven a drug SMILES, predict the activity of solubility.
TAPProteinGiven an antibody heavy chain and light chain sequence, predict its CDR length.
USPTOSmall moleculeGiven the product SMILES, generate the reactant SMILESs.
USPTO YieldsSmall moleculeGiven a catalyst SMILES, reactant SMILES, and product SMILES, predict the yield.
VDss LombardoSmall moleculeGiven a drug SMILES, predict the volume of distributon.
+ +Table S.6 | Types of drugs and targets found in our data. Features found in our data as well as their textual representation and an illustrative example. Protein sequences are divided into several subtypes: some proteins and peptides are represented using their full amino acid sequence whereas MHC molecules are represented using the amino acid pseudo-sequences that only use residues in contact with a peptide, and TCRs only use CDR3 hypervariable loops. + +
Representation TypeRepresentationExample
Small MoleculesSMILES stringCN1C(=O)CN=C(C2=CCCCC2)c2cc(Cl)ccc21
Amino Acid: Proteins and peptidesAmino acid sequencesQLADETLLKV
Amino Acid: MHC moleculesPseudo-sequences †YFAMYGEKVAHTHVDTLYVRYHYYTWAEWAYTWY
Amino Acid: T cell receptorsCDR3 hypervariable loopsCSASEGTSSYEQYF
Nucleic acidNucleotide sequenceACAGCCCAGCAGUUUAUCACGGG
DiseaseEnglish textChronic myeloproliferative disease
Cell LineEnglish textNU-1, stomach cell sourced from cancer
+ +† Only for residues in contact with a peptide. + +# C Method details + +This section elaborates on the modeling choices employed in the development of TxGemma. Tables S.7 and S.8 illustrate prompts used for binary classification, regression, and generation tasks, showcasing the input structure for the model including the instructions and context provided to the model. Table S.9 provide a concrete example of few-shot prompting applied to a binary classification task using 10 examples with nearest-neighbor shots. Each dataset in our data is structured as a text prompt, consisting of instructions, context, a question, and the corresponding answer. To provide relevant background, we created 2-3 sentence contexts based on TDC dataset descriptions and literature searches. Prompts used for predicting adverse events in clinical trials based on the TrialBench dataset [1] are shown in Table S.10. To illustrate the reasoning process of Agentic-Tx, Table S.11 provides an example of the steps taken to answer a chemical preference question from ChemBench. Table S.12 also provides a comprehensive list of the tools available of Agentic-Tx. Section C.1 provides details of the Wilcoxon signed-rank test used to assess the performance of our models across all tasks. + +We utilize random data points from the training set for few-shot learning during training. Although we use nearest neighbor shots for evaluation, we opt for random shots during training due to the higher intra-set similarity observed within the training data compared to between training and test sets, as illustrated in Figure S.2. + +![](images/a97b57e1e6b69f48df9b46566fd0c5232a634bcaa152ee1d7db532c7273836dd.jpg) +Figure S.2 | Distribution of the Tanimoto similarities for the 10 nearest neighbors in the AMES task. Nearest neighbors are calculated from the training set for training and validation sets, and from both the training and validation sets for the test set. + +# C.1 Aggregated method comparison + +For a pair of performances $(x_{i},y_{i})$ of a task $i$ , the test statistic of the Wilcoxon signed-rank test is calculated as the minimum of the positive-rank sum $(W^{+})$ and the negative-rank sum $(W^{-})$ , + +$$ +W ^ {+} = \sum_ {X _ {i} > 0} R _ {i} \tag {1} +$$ + +$$ +W ^ {-} = \sum_ {X _ {i} < 0} R _ {i} \tag {2} +$$ + +where $X_{i} = x_{i} - y_{i}$ and $R_{i}$ is the rank of $|x_{i} - y_{i}|$ . In order to account for the differences in magnitudes for MAE and MSE metrics, we normalized all performances by the mean of the performances from both models. We also reversed the sign of MAEs and MSEs because lower MAEs and MSEs correspond to better performances. + +Table S.7 | Example of prompts for binary classification tasks. + +Instructions: Answer the following question about drug properties. + +Context: As a membrane separating circulating blood and brain extracellular fluid, the blood-brain barrier (BBB) is the protection layer that blocks most foreign drugs. Thus the ability of a drug to penetrate the barrier to deliver to the site of action forms a crucial challenge in development of drugs for central nervous system. + +Question: Given a drug SMILES string, predict whether it + +(A) does not cross the BBB (B) crosses the BBB + +Drug SMILES: $\mathrm{CN1C(=O)CN = C(C2 = CCCCC2)c2cc(Cl)ccc21}$ + +Answer: (B) + +Instructions: Answer the following question about peptide-MHC binding. + +Context: In the human body, T cells monitor the existing peptides and trigger an immune response if the peptide is foreign. To decide whether or not if the peptide is not foreign, the peptide must bind to a major histocompatibility complex (MHC) molecule. Therefore, predicting peptide-MHC binding affinity is pivotal for determining immunogenicity. In some experiments, the peptide binding is measured against cells that express multiple MHCs, so the peptide could be binding any one of the possible MHCs. Class 1 MHC molecules bind to peptides that are usually 8-14 amino acids long and activate CD8 T cells. + +Question: Given the amino acid sequence of the peptide and possible pseudo amino acid sequences of MHC 1, predict whether the peptide + +(A) does not bind to any of the MHCs (B) binds to any of the MHCs + +Peptide amino acid sequence: QLADETLLKV + +Possible MHC pseudosequences: YFAMYGEKAVTHVDTLYVRYHYTTYEAWAYTWY + +Answer: (B) + +Instructions: Answer the following question about miRNA protein interactions. + +Context: MicroRNAs (miRNAs) are, small non-coding RNAs with 18-25 nucleotides, which are central regulators at the post-transcriptional level in both animals and plants. Perfect or near-perfect complementary binding of miRNAs and their target mRNA negatively regulates gene expression by accelerating mRNA degradation or suppressing mRNA translation. + +Question: Given the miRNA mature sequence and target amino acid sequence, predict whether + +(A) the miRNA and target do not interact (B) the miRNA and target interact + +miRNA sequence: UUCCUGUCAGCCGUGGGUGCC + +Target amino acid sequence: MSVNMDELRHQVMINQFVLAAGCAADQAKQLLQAAHWQFETALSTFFQET-NIPNSHHHHQMMCTPSNTPATPPNFPDALAMFSKLRASEGLQSSNSPMTAAACSPANFSPFWASSPPSHQAPWIP-PSSPTTFHLHRPQPTWPPGAQQGGAQQKAMAAMDGQR + +Answer: (A) + +Instructions: Answer the following question about clinical trials. + +Context: Clinical trial is the most time and cost-consuming step in the drug discovery process. Phase 1 clinical trials test the safety and basic properties of a new drug or treatment in a small group of people for the first time. Optimizing and designing trials with machine learning could drastically lead to the speedup of delivery of life-saving therapeutics to patients. Clinical trial outcome prediction is a machine learning task that aims to forecast the outcome of clinical trials, such as the approval rate of a drug or treatment. It utilizes various clinical trial features, including the drug's molecular structure and patient disease. + +Question: Given a drug SMILES string and disease, predict if the phase 1 trial + +(A) would not be approved (B) would be approved + +Drug SMILES: COC1=NC(N)=NC2=C1N=CN2[C@@H]1O[C@H](CO)[C@@H](O)[C@@H]1O + +Disease: Chronic myeloproliferative disease + +Answer: (A) + +Table S.8 | Example of prompts for regression and generation tasks. + +Instructions: Answer the following question about drug properties. + +Context: The human colon epithelial cancer cell line, Caco-2, is used as an in vitro model to simulate the human intestinal tissue. The experimental result on the rate of drug passing through the Caco-2 cells can approximate the rate at which the drug permeates through the human intestinal tissue. + +Question: Given a drug SMILES string, predict its normalized Caco-2 cell effective permeability from 000 to 1000, where 000 is minimum permeability and 1000 is maximum permeability. + +Drug SMILES: $\mathrm{O} = \mathrm{C}(\mathrm{O})\mathrm{{COC}}\left( { = \mathrm{O}}\right) \mathrm{{Cc}}1\text{ccc}\mathrm{{cc}}1\mathrm{{Nc}}1\mathrm{{c}}\left( \mathrm{{Cl}}\right) \mathrm{{ccc}}1\mathrm{{Cl}}$ + +Answer: 788 + +Instructions: Answer the following question about drug responses. + +Context: The same drug compound could have various levels of responses in different patients. To design drug for individual or a group with certain characteristics is the central goal of precision medicine. In experiments, IC50s of drugs were measured against cancer cell lines. + +Question: Given a drug SMILES string and a cell line description, predict the normalized drug sensitivity from 000 to 1000, where 000 is minimum drug sensitivity and 1000 is maximum drug sensitivity. + +Drug SMILES: $\mathrm{CN1C = C(C2 = CC = CC = C21) / C = C\backslash 3 / C4 = C(C = CC = N4)NC3 = O}$ + +Cell line description: SNU-1, stomach cell sourced from cancer + +Answer: 615 + +Instructions: Answer the following question about drug target interactions. + +Context: Drug-target binding is the physical interaction between a drug and a specific biological molecule, such as a protein or enzyme. This interaction is essential for the drug to exert its pharmacological effect. The strength of the drug-target binding is determined by the binding affinity, which is a measure of how tightly the drug binds to the target. Kd is the dissociation constant of a drug-target complex. It is the concentration of drug at which half of the drug-target complexes have dissociated. A lower Kd value indicates a stronger binding affinity. + +Question: Given the target amino acid sequence and compound SMILES string, predict their normalized binding affinity Kd from 000 to 1000, where 000 is minimum Kd and 1000 is maximum Kd. + +Drug SMILES: $\mathrm{O = S(=O)(O)c1cccc2ccc(Nc3cccccc)3c12}$ + +Target amino acid sequence: MATVQQLEGRWRLVDSKGFDEYMKELGVIALRKMGAMKPDCIITCDGKNLTIKTESTLKITTQFSCTLGEKFETTADGRKTQTVCNFTDGALVHQWEWDGKESTITRKLKDGLVVECVMNNVTCTRIYEKVE + +Answer: 397 + +Instructions: Answer the following question about reactions. + +Context: Retrosynthesis is the process of finding a set of reactants that can synthesize a target molecule, i.e., product, which is a fundamental task in drug manufacturing. The target is recursively transformed into simpler precursor molecules until commercially available "starting" molecules are identified. In a data sample, there is only one product molecule, reactants can be one or multiple molecules. + +Question: Given a product SMILES string, predict the reactant SMILES string. + +Product SMILES: [CH2:12]1[C:7]2([CH2:6][CH2:5][O:15][CH2:1][CH2:8]2)[CH2:13][CH2:14][O:10][C:11]1=[O:17] + +Answer: [CH:1]12B[CH:5]([CH2:6][CH2:7][CH2:8]1)CCC2.[O:10]1[CH2:14][CH2:13][CH2:12] [CH2:11]1.[OH:15].[Na+].[OH:17]O.CI + +Table S.9 | Example of a 10-shot prompt for a binary classification task. Shots are selected from nearest neighbors in the combined training and validation set (not the test set). + +Instructions: Answer the following question about drug properties. + +Context: As a membrane separating circulating blood and brain extracellular fluid, the blood-brain barrier (BBB) is the protection layer that blocks most foreign drugs. Thus the ability of a drug to penetrate the barrier to deliver to the site of action forms a crucial challenge in development of drugs for central nervous system. + +Question: Given a drug SMILES string, predict whether it (A) does not cross the BBB (B) crosses the BBB + +Drug SMILES: $\mathrm{CN1C(=O)CN = C(c2cccccc)2c2cc(Cl)ccc21}$ + +Answer: (B) + +Drug SMILES: $\mathrm{CN1C(=O)CN = C(c2cccccc2F)c2cc(Cl)ccc21}$ + +Answer: (B) + +Drug SMILES: $\mathrm{CN1C(=S)CN = C(c2cccccc)2c2cc(Cl)ccc21}$ + +Answer: (B) + +Drug SMILES: CP(C)(=O)CN1C(=O)CN=C(c2cccccc2)c2cc(Cl)ccc21 + +Answer: (B) + +Drug SMILES: $\mathrm{CN1C(=O)CN = C(c2cccccc)2c2cc([N + ](=O)[O - ])ccc21}$ + +Answer: (B) + +Drug SMILES: CCN(CC)CCN1C(=O)CN=C(c2cccccc2F)c2cc(Cl)ccc21 + +Answer: (B) + +Drug SMILES: $\mathrm{O} = \mathrm{C}1\mathrm{{CN}} = \mathrm{C}\left( {c2\text{ccc}cc2}\right) c2\mathrm{{cc}}\left( \mathrm{{Cl}}\right) \mathrm{{ccc}}2\mathrm{\;N}1\mathrm{{CC}}1\mathrm{{CC}}1$ + +Answer: (B) + +Drug SMILES: C#CCN1C(=O)CN=C(c2cccc2)c2cc(Cl)ccc21 + +Answer: (B) + +Drug SMILES: $\mathrm{O} = \mathrm{C}1\mathrm{{CN}} = \mathrm{C}\left( {\mathrm{c}2\text{ccc} : 2}\right) \mathrm{c}2\mathrm{{cc}}\left( \mathrm{{Cl}}\right) \mathrm{{ccc}}2\mathrm{\;N}1\mathrm{{CC}}\left( \mathrm{F}\right) \left( \mathrm{F}\right) \mathrm{F}$ + +Answer: (B) + +Drug SMILES: $\mathrm{CCS}(\mathrm{=O})(\mathrm{=O})\mathrm{CCN1C}(\mathrm{=O})\mathrm{CN} = \mathrm{C}(\mathrm{c2cccccc2F})\mathrm{c2cc(Cl)ccc21}$ + +Answer: (B) + +Drug SMILES: $\mathrm{CN1C(=O)CN = C(C2 = CCCCCC2)c2cc(Cl)ccc21}$ + +Answer: (B) + +Table S.10 | Example of prompts for predicting adverse events in clinical trials. The top prompt only provides drug SMILES strings while the bottom prompt also includes textual information about the clinical trial. + +From the following information about a clinical trial, predict whether it would have an adverse event. + +```javascript +Drug: CC[C@H]1[C@@H](COC2=C3C=C(OC)C(=CC3=CC=N2)C(N)=O)NC(=O)[C@H]1F.[H][C@@]12CC[C@H](O)[C@@]1(C)CC[C@]1([H])C3=C(CC[C@@]21[H])C=C(O)C=C3 +``` + +Answer: No + +From the following information about a clinical trial, predict whether it would have an adverse event. + +Title: A Study To Estimate The Effect of PF-06650833 On The Pharmacokinetics (PK) of Oral Contraceptive (OC) Summary: This is a Phase 1, open label, fixed sequence study of the effect of multiple dose PF-06650833 on single dose OC PK in healthy female subjects. + +Phase: 1 + +Disease: Healthy + +Minimum age: 18 Years + +Maximum age: 60 Years + +Healthy volunteers: Accepts Healthy Volunteers + +Interventions: $400\mathrm{mg}$ by mouth (PO) Once daily (QD) for 11 days; Single dose of Oral tablet containing 30 ug EE and 150 ug of LN + +```javascript +Drug: CC[C@H]1[C@@H](COC2=C3C=C(OC)C(=CC3=CC=N2)C(N)=O)NC(=O)[C@H]1F.[H][C@@]12CC[C@H](O)[C@@]1(C)CC[C@]1([H])C3=C(CC[C@@]21[H])C=C(O)C=C3 +``` + +Answer: No + +Table S.11 | Example of Agentic-Tx ChemBench chemical preference question answering. + +
Example of Chembench Chemical Preference
QuestionConsider the following two drug candidates: +A. N#Cclc(NC(=O)c2cc([N+](=O)[O-])ccc2N2CCOCC2)sc2c1CCCC2 +B. O=C(C=Cclc00001)c1cccc1. +Which is more preferable for further development?
Thought 1First, I need to obtain more information about each drug candidate. I will use the SMILES to Description tool to get detailed descriptions of each molecule from their SMILES strings, starting with candidate.
Action 1SMILES to Description +Input SMILES: N#Cclc(NC(=O)c2cc([N+](=O)[O-])ccc2N2CCOCC2)sc2c1CCCC2
Observation 1PubChem CID: 3934361 +Molecular Formula: C20H20N4O4S +Molecular Weight: 412.5 +Charge: 0 +IUPAC Name: N-(3-cyano-4,5,6,7-tetrahydro-1-benzothiophen-2-yl)-2-morpholin-4-yl-5-nitrobenzamide XLogP: 3.8 +Synonyms: MLS000335194, CHEMBL1549645, HMS2597A10, HMS3379H10, AKOS001044982, SMR000249952, SR-01000056848, SR-01000056848-1, Z27367728 +(Summarized) The provided information describes a single molecule (PubChem CID 3934361) with a cyano-tetrahydro-benzothiophene core, a morpholino-nitrobenzamide substituent, and an XLogP of 3.8, indicating its lipophilicity.
Thought 2Now, I will use the SMILES to Description tool to get detailed descriptions of candidate B from its SMILES string. After that, I will compare the information obtained for both candidates to determine which is more preferable for further development.
Action 2SMILES to Description +Input SMILES: O=C(C=Cclc00001)c1cccc1
Observation 2PubChem CID: 219207 +Molecular Formula: C14H11NO +Molecular Weight: 209.24 +Charge: 0 +IUPAC Name: 1-phenyl-3-pyridin-2-ylprop-2-en-1-one XLogP: 2.7 +Synonyms: 3-(2-PYRIDYL)-ACRYLOPHENONE, MLS002637493, azachalcone, CHEMBL1717486, DTXSID601279307, HMS3079I05, SMR001547031, 1-Phenyl-3-(2-pyridinyl)-2-propen-1-one +(Summary) The molecule (B) represented by the SMILES O=C(C=Cclc00001)c1cccc1, also known as 1-phenyl-3-pyridin-2-ylprop-2-en-1-one, has a molecular weight of 209.24 g/mol and a calculated XLogP value of 2.7.
Observation 2
Thought 3Based on the descriptions, candidate B (XLogP = 2.7) is less lipophilic than candidate A (XLogP = 3.8). Lower lipophilicity can often be associated with better absorption and distribution properties. I will now use the ClinicalTox tool to assess the clinical toxicity of candidate B.
Action 3ClinicalTox +Input SMILES: O=C(C=Cclc00001)c1cccc1
Observation 3Context: Humans are exposed to a variety of chemicals through food, household products, and medicines, some of which can be toxic, leading to over 30% of promising pharmaceuticals failing in human trials due to toxicity. Toxic drugs can be identified from clinical trials that failed due to toxicity, while non-toxic drugs can be identified from FDA approval status or from clinical trials that report no toxicity. Prediction returned: O=C(C=Cclc00001)c1cccc1 is not toxic! +(Summary) Based on the provided information, drug candidate B (O=C(C=Cclc00001)c1cccc1) is predicted to be non-toxic, suggesting it might be more preferable for further development.
Observation 3
Final ResponseCandidate B is more preferable for further development. Here's why: +-Lipophilicity (XLogP): Candidate B has a lower XLogP value (2.7) compared to Candidate A (3.8). Lower lipophilicity is generally favored in drug development. Excessive lipophilicity (high XLogP) can lead to problems like poor solubility, increased risk of off-target binding and toxicity, and difficulties in achieving good absorption and distribution in the body. A value closer to the range of many oral drugs is preferred. +-Toxicity: The clinical toxicity tool states that candidate B is predicted to be non-toxic.
+ +Table S.12 | Descriptions of tools used by Agentic-Tx. + +
Tool NameDescription
ToxCastUses TxGemma to predict the toxicity of a given drug (SMILES string) in various ToxCast assays based on the provided context. Returns results indicating whether the drug is toxic or not in each selected assay.
ClinicalToxUses TxGemma to predict the clinical toxicity of a given drug (SMILES string) for humans. Returns a result indicating whether the drug is predicted to be toxic or not.
ChatAllows conversational interaction with TxGemma-Chat. Enables posing therapeutics-related questions and receiving responses.
MutagenicityUses TxGemma to predict whether a given drug (SMILES) is mutagenic based on the Ames test. Returns a result indicating if the drug is mutagenic or not.
IC50Uses TxGemma to predict the normalized IC50 between a drug (SMILES) and a target protein (amino acid sequence). Returns a IC50 value, with lower values suggesting potent inhibition.
Phase 1 TrialUses TxGemma to predict the approval outcome of a Phase 1 clinical trial for a drug (SMILES) against a specified disease. Returns a result indicating whether the trial would be approved or not.
Wikipedia SearchSearches Wikipedia for a given text query. Returns the top matching article's title, link, and a short summary.
PubMed SearchQueries PubMed for scientific articles based on a search text. Returns metadata (PMID, title, authors, journal, date, abstract) for the top few articles.
Web SearchPerforms a general web search. Returns titles, links, and snippets for the top search results.
HTML FetchFetched the raw HTML content of a given URL. Useful for inspecting webpage details.
SMILES to DescriptionRetrieves molecular information from PubChem for a given SMILES string. Returns properties like PubChem CID, molecular formula, IUPAC name, XLogP, and synonyms.
SMILES TherapyRetrieves therapeutic information (ChEMBL ID, mechanisms of action, drug indications, ATC classifications) for a drug given its SMILES string.
Molecule ToolProvides molecule-related functions: searching for compounds by name (returns properties and IDs) and converting between molecular representations (InChI, SMILES, InChIKey, Mol).
Molecule ConvertConverts a molecules representation from one type to another (e.g., SMILES to InChI).
Gene SequenceRetrieves amino acid sequences for a given gene name and organism. Searches NCBI Nucleotide, fetches records, and translates DNA to protein sequences.
Gene DescriptionRetrieves descriptive information about a gene from NCBI Gene, including official symbol, full name, description, and summary.
BlastPRuns a BLASTP search against NCBI databases for a given amino acid sequence. Returns hits with gene names, organisms, and accessions.
Protein DescriptionProvides descriptive information (organism, definition, accession) for a protein, either by name or amino acid sequence. Uses NCBI Protein database or BLASTP.
+ +# D Additional results + +# D.1 TxGemma-Predict performance + +Figure S.4 compares TxGemma-27B-Predict with previous SOTA models, taking into account that Tx-LLM M achieved SOTA performance on many tasks. We provide detailed results tables for binary classification tasks in Table S.13 (comparing against specialist SOTA and base models) and Table S.15 (comparing against TxGemma-Chat and Tx-LLM), and for regression and generation tasks in Table S.14 (comparing against specialist SOTA and base models) and Table S.16 (comparing against TxGemma-Chat and Tx-LLM). Tables S.17 and S.18 list the performances of released TxGemma models trained only on datasets with commercial licenses. Figures S.5 and S.6 compares TxGemma-27B-Predict with LlaSMol and MolE, models specialized for small molecules, on small molecule tasks. Figure S.12 plots the percentage of tasks that contain contaminated datapoints overlapping with the Gemma-2 pretraining data, the percent of contaminated datapoints for these tasks, and Figure S.13 shows the results of TxGemma-27B-Predict after filtering contaminated datapoints out. We observe that most tasks have no contamination, and filtering these datapoints out does not negatively impact TxGemma-27B-Predict performance. Figure S.16 plots performances for particular feature types across multiple model sizes, showing that the integration of SMILES strings and textual information is consistent. Figure S.17 plots performances over all tasks for comparisons of model size and domain fine-tuning, showing that these variables are significant. Figure S.18 shows that TxGemma-27B-Predict toxicity and clinical trial approval predictions are correlated, likely because toxicity in an important component of trial approval. Figure S.11 plots the inference speed, normalized by the number of chips used for serving, for all model sizes. + +# D.2 Conversing with TxGemma-27B-Predict and TxGemma-27B-Chat + +Figure S.8 illustrates an example of providing a prompt to TxGemma-27B-Predict that is not in the processed data format. TxGemma-27B-Predict is able to provide a coherent response in a manner similar to the general LLMs. Figure S.9 illustrates an example of first providing a prompt to TxGemma-27B-Predict in the processed format and asking follow-up questions in subsequent turns. In the second turn, instructing the model to not in the processed data format is able to elicit a reasonable but succinct response. However, the third turn leads to the model answering in the processed data format, highlighting the difficulty of multi-turn dialogue after training only on the processed TDC data. Figure S.7 plots the performance of TxGemma-27B-Chat on the MMLU benchmark in comparison with both Gemma-2-27B and TxGemma-27B-Predict. TxGemma-27B-Chat performs similarly to Gemma-2-27B on MMLU while TxGemma-27B-Predict scores much lower. Figure S.10 shows an example of using a specific prompting structure with TxGemma-27B-Chat to elicit reasoning on a more challenging task of clinical trial approval. If this prompting structure is not used, the model refuses to provide reasoning. + +# D.3 Agentic-Tx Tool Use Analysis + +Figure S.14 shows the tool usage frequency for different benchmarks, illustrating that Agentic-Tx dynamically adjusts its tool usage to suit the problem. Figure S.15 shows the most frequent tools used per question for chemical preference questions, showing consistent usage of molecule-based tools. + +# D.4 Proof-of-concept use of TxGemma for end-to-end therapeutic development + +In Figure S.3, we illustrate a simplified example of how TxGemma might be helpful in identifying a drug for ovarian cancer. In this example, we chose to directly prompt TxGemma, rather than using Agentic-Tx, to strictly isolate potential information leakage introduced by web search, which is outside of our training data. This approach allows us to examine the model's inherent capabilities, though we acknowledge that a full agent-based workflow is a plausible extension. + +We initially use the DisGeNET prompt to identify an ovarian cancer-associated target gene from a short list of genes including PIK3CA, JAK2, RET. TxGemma-27B-Predict predicts that PIK3CA, a gene not found in the training set which is known to be mutated in ovarian cancer [2], has an association score of 0.7 with ovarian cancer. This association score is nearly 2.5 standard deviations above the mean score ( $\mu = 0.37$ , $\sigma = 0.13$ ), indicating a strong association. JAK2 and RET share an association score of 0.3 which is below + +the mean score. We then used TxGemma-27B-Predict to select a potential therapeutic from a molecule shortlist, prioritizing predicted $\mathrm{IC}_{50}$ against the E545K mutant (an oncogenic mutation [3]), toxicity, and clinical trial success. Our manually curated shortlist of drugs, unseen to the model during training, include two existing cancer therapies including alpelisib and afatinib and a novel molecule which we randomly generated. Both afatinib $(1.02\mu \mathrm{M}\mathrm{IC}_{50})$ and the novel molecule $(10.2\mu \mathrm{M}\mathrm{IC}_{50})$ exhibit high predicted $\mathrm{IC}_{50}$ values, suggesting weak inhibition. However, alpelisib has a predicted $\mathrm{IC}_{50}$ of $30~\mathrm{nM}$ , suggestive of potent inhibition and relatively close to the experimental value of $5\mathrm{nM}$ suggested by Chen et al. [4] and Fritsch et al. [5]. TxGemma-27B-Predict also predicts that alpelisib is not mutagenic and would pass a phase 1 clinical trial for ovarian cancer. This iterative evaluation also corroborated by existing evidence: alpelisib is approved for breast cancer [6] and has shown activity in ovarian cancer [7, 8, 9]. + +This workflow demonstrates a proof-of-concept for TxGemma's application in automating and optimizing therapeutic selection. We anticipate an agentic system capable of generating comprehensive lists of potential therapies and gene-disease associations paired with TxGemma would enable rapid prioritization and filtering, helping in reducing the candidate pool and accelerating the transition to preclinical studies. However, it's crucial to acknowledge the limitations of this demonstration. Clinical trial predictions are limited to Phase 1 success, and mutagenicity predictions do not encompass all aspects of small molecule toxicity. Future work should include experimental validation of TxGemma predictions and consideration of additional toxicity factors, such as hematologic toxicity, which were not included in our data. + +# TxGemma: Gene-Disease Association + +Q: Predict association from O to 1 between the following gene and malignant neoplasm of ovary. + +PIK3CA:MPPRPSSGELW + +A: PI3KCA has an association score of 0.7. 0.7 is 2.5a above the mean score + +# TxGemma: Drug-Target Interaction + +Q: Given the following gene, predict how effectively the drug will inhibit it. + +PI3KCA E545K: MPPRSPSGELW... +Alpelisib: C1-Cc(SC(-N)NC...) + +A: Alpelisib has a IC5O of 30 nM. + +![](images/229596d5615d8db9e940ffb05b38f2d2da4d2620458f30f93387cd1de47e9957.jpg) +Figure S.3 | Proof-of-concept example of applying TxGemma to end-to-end therapeutic development. TxGemma is used to suggest a therapeutic for ovarian cancer by first identifying PIK3CA as an associated gene target from a list of possible genes. Then, from a list of candidate therapeutics, TxGemma predicts that alpelisib (a molecule previously unseen to TxGemma that has shown activity against ovarian cancer and is approved for breast cancer) would bind the E545K mutant of PIK3CA, that it would not be toxic/mutagenic, and that it would be approved in a clinical trial. Note that this example serves as a proof-of-concept demonstration and does not account for all aspects of efficacy, toxicity, or trial approval. Rigorous experimental validation of TxGemma predictions to completely new therapeutics is also a critical step to evaluating TxGemma and remains an area of future work. + +# TxGemma: Clinical Trial Approval + +Q: Predict whether the following drug will pass a phase I clinical trial against malignant neoplasm of ovary. + +# Alpelisib:CC1=C(SC(=N1)NC... + +A: Approved. + +# TxGemma: Toxicity Prediction + +Q: Predict whether the following drug is mutagenic. + +# Alpelisib:CC1=C(SC(=N1)NC... + +A: Not mutagenic. + +![](images/42c41199fcf8e1c1b61be220eadee67478dcac28dec9f92bb963ec83de23b969.jpg) + +![](images/0fc6f2350e51b7e6cd5a7bbc1317a452740c03d27b918c2dcd047504fcfadd35.jpg) + +![](images/d6c50205b6b2c913f7c73865098c30a77a468c56c5c6ec1feffbffeeab77100e.jpg) +Multi-instance tasks +Single-instance and generative tasks +Figure S.4 | Performance of TxGemma-27B-Predict compared to generalist and specialist SOTA models (top) The median relative change in performance of TxGemma-27B-Predict compared to Tx-LLM M. (middle) The median relative change in performance of TxGemma-27B-Predict compared to specialist SOTA models. (bottom) The median relative change in performance of TxGemma-27B-Predict compared to all SOTA models, including both Tx-LLM M and specialist models. Multi-instance tasks indicate tasks that involve multiple features, whereas single-instance tasks only involve one feature. The tasks within each task type are defined in Tables S.2 and S.3. + +![](images/d64008d7bba648430f646cee132bb06a7242ca81f3335858adf4a949d8bac876.jpg) +Figure S.5 | TxGemma performs comparably to LlaSMol on small molecule tasks. Accuracy is reported for binary classification tasks, and RMSE is reported for regression tasks. BBBP corresponds to BBB Martins in TDC tasks, ESOL corresponds to Solubility AqSolDB, and Lipo corresponds to Lipophilicity AstraZeneca. + +![](images/9d4476e7572c6c23be88f9ba85d88ce520cdc474eb566aa40abb2c1e2e6cb7ea.jpg) + +![](images/e6c13c82f9aad77ffe02614d6eac77afddf8ff42b5e2ab8a220b12c5e64a976b.jpg) +Figure S.6 | TxGemma performs comparably to MolE on small molecule tasks. Comparison of MolE with TxGemma-27B-Predict on TDC tasks, separated by metric type (MAE, AUROC, Spearman correlation, and AUPRC). TxGemma-27B-Predict performs better than MolE on 10 out of 22 tasks. + +![](images/3485a458e0e83c0575b63d1e81dbaa9c3d70d585063afc9a1cb9ef52ff9240c6.jpg) + +![](images/09534e3b6d118249db6e3edff3bfb85233449f57161dcacc055ff510acfaca7c.jpg) + +![](images/c48b857842aeac69278aa4332949b757fe4bc93f85a7901073f2eedfb083c3c4.jpg) + +Table S.13 | Model performance on binary classification tasks. TxGemma-Predict and Gemma-2 performances compared with specialist SOTA for each binary classification task, along with the metric type. + +
Task NameMetricSpecialist SOTAGemma-2-2BGemma-2-9BGemma-2-27BTxGemma-2B-PredictTxGemma-9B-PredictTxGemma-27B-Predict
AMESAUROC0.871 [10]0.4870.6050.5080.7960.7980.816
BBB MartinsAUROC0.915 [11]0.2500.6450.5460.8640.8740.907
Bioavailability MaAUROC0.748 [12]0.4790.5840.5790.7150.6550.696
CYP1A2 VeithAUPRC0.900 [13]0.3880.5330.5620.9100.9160.922
CYP2C19 VeithAUROC0.890 [13]0.4560.5950.6190.9050.9060.899
CYP2C9 Substrate CarbonMangelsAUPRC0.441 [10]0.2930.3360.3670.4570.4680.427
CYP2C9 VeithAUPRC0.839 [14]0.2830.3740.4170.8010.7990.798
CYP2D6 Substrate CarbonMangelsAUPRC0.736 [14]0.2330.3290.3860.6050.6030.706
CYP2D6 VeithAUPRC0.739 [14]0.1450.1660.1850.6370.6640.681
CYP3A4 Substrate CarbonMangelsAUROC0.662 [15]0.5140.5850.5960.6690.6220.690
CYP3A4 VeithAUPRC0.904 [14]0.4270.5310.5350.8440.8390.854
Carcinogens LaguninAccuracy0.770 [16]0.2500.2860.3390.8210.8390.857
ClinToxAUROC0.948 [17]0.4370.4820.4240.8100.8310.888
DILIAUROC0.925 [10]0.3200.6510.6270.8750.8480.887
HIA HouAUROC0.988 [18]0.2570.9320.7830.9370.9670.988
HIVAUROC0.851 [19]0.4910.4950.5370.7370.7340.764
HuRIAUPRC0.724 [20]0.4960.4840.5260.7510.7790.799
MHC1 IEDB IMGT NielsenAUROC0.986 [21]0.4980.5040.5170.9100.9270.929
MHC2 IEDB JensenAUROC0.940 [22]0.4980.5260.5440.8120.8500.851
PAMPA NCATSAUROC0.900 [23]0.4650.5830.5440.6420.6710.705
Pgp BroccatelliAUROC0.935 [10]0.4160.6700.4970.9000.9110.936
SARSCOV2 3CLPro DiamondAUROC0.800 [24]0.3010.3880.4770.7330.7080.769
SARSCoV2 Vitro TouretAUROC0.640 [25]0.5680.6110.4790.6500.6680.598
SAbDab ChenAUPRC0.510 [26]0.5320.6960.7010.6760.8070.767
Skin ReactionAUROC0.840 [27]0.4290.5460.4930.6710.6480.708
Tox21AUROC0.961 [28]0.3580.4360.4970.8810.8960.893
ToxCastAUROC0.777 [17]0.4850.5120.5580.7840.7670.800
butkiewiczAUROC0.840 [29]0.4570.4910.4910.7910.7720.831
hERGAUROC0.874 [12]0.5380.6390.5000.8760.8810.884
hERG KarimAccuracy0.770 [30]0.5290.5320.5220.7780.7940.774
herg centralAUROC0.860 [31]0.4810.5110.5170.8800.8610.896
miRTarBaseAccuracy0.804 [32]0.4980.5010.4980.8050.8290.801
phase1AUROC0.576 [33]0.5620.5620.5530.6420.6350.622
phase2AUROC0.645 [33]0.5430.5710.5310.6650.6680.676
phase3AUROC0.723 [33]0.5590.5670.5590.7310.7290.739
weberAUROC0.870 [34]0.4660.5860.4690.7300.7270.749
+ +Table S.14 | Model performance on regression and generation tasks. TxGemma-Predict and Gemma-2 performances compared with specialist SOTA for each regression and generation task, along with the metric type. Tasks for which we did not find a specialist SOTA value are indicated with N/A. + +
Task NameMetricSpecialist SOTAGemma-2-2BGemma-2-9BGemma-2-27BTxGemma-2B-PredictTxGemma-9B-PredictTxGemma-27B-Predict
BindingDB PatentPCC0.588 [35]-0.066-0.0390.0300.4220.5240.538
BindingDB ic50Spearman0.637 [36]0.0010.0020.0440.3990.3980.445
BindingDB kdPCC0.712 [37]0.197-0.0090.1190.3520.3700.456
BindingDB kiPCC0.840 [38]-0.018-0.053-0.0270.6610.7370.676
Buchwald HartwigPCC0.786 [39]0.5280.6360.6840.8610.9150.910
Caco2 WangMAE0.285 [18]1.0570.5330.6180.4760.3730.401
Clearance Hepatocyte AZSpearman0.440 [40]0.1410.1630.2140.3530.3380.259
Clearance Microsome AZSpearman0.625 [18]0.2390.3250.2940.4680.6230.462
DAVISMSE0.219 [41]2.7059.0544.4730.6010.5870.555
DisGeNETMAEN/A0.2940.2950.2770.0570.0540.054
DrugComb BlissMAE4.560 [42]8.2137.4136.4564.2304.3374.156
DrugComb CSSMAE16.858 [42]36.84733.83722.61415.75216.48015.000
DrugComb HSAMAE4.453 [42]7.4587.3656.6704.2314.3354.209
DrugComb LoeweMAE9.184 [42]13.87313.36914.73117.34218.66517.336
DrugComb ZIPMAE4.027 [42]8.5886.2265.4043.9503.9043.807
GDSC1PCC0.860 [43]-0.0410.0730.0930.8760.5450.892
GDSC2PCC0.860 [43]-0.043-0.0370.0860.8240.5390.912
Half Life ObachSpearman0.547 [44]0.2880.2840.4850.3860.4940.458
KIBAMSE0.154 [41]2.8871.9252.0160.5880.5480.633
LD50 ZhuMAE0.552 [18]1.9710.8960.8740.7100.6300.628
LeenaySpearman0.740 [45]0.0850.0910.1460.0970.0670.276
Lipophilicity AstraZenecaMAE0.467 [46]1.5061.2071.0320.6100.5650.539
OncoPolyPharmacologyPCC0.730 [47]-0.0400.0640.0720.4730.5180.540
PPBR AZMAE7.788 [46]10.8369.7689.8799.2668.8899.029
Protein SAbDabMAEN/A1.2801.1701.1631.0661.1061.210
Solubility AqSolDBMAE0.761 [46]4.2142.5493.0960.9610.8680.821
TAPMAEN/A5.0084.2413.9585.3014.4734.280
USPTOAccuracy0.415 [48]0.0000.0010.0000.2870.0970.084
USPTO YieldsPCC0.361 [39]-0.0150.0260.0640.0110.0310.395
VDss LombardoSpearman0.627 [49]0.1000.4130.3540.5640.6070.560
+ +Table S.15 | Model performance on binary classification tasks. TxGemma-Predict, TxGemma-Chat, and Tx-LLM performances for each binary classification task, along with the metric type. + +
Task NameMetricTxGemma-9B-PredictTxGemma-27B-PredictTxGemma-9B-ChatTxGemma-27B-ChatTx-LLM STx-LLM M
AMESAUROC0.7980.8160.7210.7330.7850.786
BBB MartinsAUROC0.8740.9070.8110.8610.8050.882
Bioavailability MaAUROC0.6550.6960.6200.6590.6050.702
CYP1A2 VeithAUPRC0.9160.9220.8390.8230.9060.914
CYP2C19 VeithAUROC0.9060.8990.8370.8280.8770.895
CYP2C9 Substrate CarbonMangelsAUPRC0.4680.4270.3820.4270.4030.436
CYP2C9 VeithAUPRC0.7990.7980.6670.6820.7500.788
CYP2D6 Substrate CarbonMangelsAUPRC0.6030.7060.5490.7000.6430.600
CYP2D6 VeithAUPRC0.6640.6810.5040.4350.6050.659
CYP3A4 Substrate CarbonMangelsAUROC0.6220.6900.6420.6660.6370.647
CYP3A4 VeithAUPRC0.8390.8540.7490.7500.8000.840
Carcinogens LaguninAccuracy0.8390.8570.8930.9110.8570.786
ClinToxAUROC0.8310.8880.7110.6370.8180.863
DILIAUROC0.8480.8870.6880.7660.7270.882
HIA HouAUROC0.9670.9880.8720.8970.9420.990
HIV*AUROC0.7340.7640.6120.5820.6860.732
HuRIAUPRC0.7790.7990.6280.6210.7050.753
MHC1 IEDB IMGT NielsenAUROC0.9270.9290.8750.8250.9130.907
MHC2 IEDB JensenAUROC0.8500.8510.7240.6830.7810.863
PAMPA NCATSAUROC0.6710.7050.7350.6640.6460.668
Pgp BroccatelliAUROC0.9110.9360.8990.9120.9090.939
SARSCOV2 3CLPro DiamondAUROC0.7080.7690.6990.7220.7550.712
SARSCoV2 Vitro TouretAUROC0.6680.5980.5030.5060.5120.601
SAbDab ChenAUPRC0.8070.7670.7020.7190.3900.473
Skin ReactionAUROC0.6480.7080.6380.5430.5640.615
Tox21AUROC0.8960.8930.8070.7970.8580.882
ToxCastAUROC0.7670.8000.7540.7340.7790.792
butkiewiczAUROC0.7720.8310.6290.6190.5740.566
hERGAUROC0.8810.8840.8300.8320.8790.909
hERG KarimAccuracy0.7940.7740.6570.6680.7240.745
herg centralAUROC0.8610.8960.8300.8070.8800.888
miRTarBaseAccuracy0.8290.8010.6790.6440.7650.799
phase1AUROC0.6350.6220.5760.5570.6240.667
phase2AUROC0.6680.6760.6380.6260.6390.676
phase3AUROC0.7290.7390.6830.6680.7010.728
weberAUROC0.7270.7490.6720.6430.7380.743
+ +* To predict whether compounds have Anti-HIV properties. + +Table S.16 | Model performance on regression and generation tasks. TxGemma-Predict, TxGemma-Chat, and Tx-LLM performances for each regression and generation task, along with the metric type. + +
Task NameMetricTxGemma-9B-PredictTxGemma-27B-PredictTxGemma-9B-ChatTxGemma-27B-ChatTx-LLM STx-LLM M
BindingDB PatentPCC0.5240.5380.4520.2200.4740.531
BindingDB ic50Spearman0.3980.4450.4120.3620.3260.311
BindingDB kdPCC0.3700.4560.1620.1590.3170.391
BindingDB kiPCC0.7370.6760.4480.2110.5650.726
Buchwald HartwigPCC0.9150.9100.2550.7570.6820.905
Caco2 WangMAE0.3730.4010.6430.3980.6210.432
Clearance Hepatocyte AZSpearman0.3380.2590.1970.1500.2560.385
Clearance Microsome AZSpearman0.6230.4620.3450.4200.3850.413
DAVISMSE0.5870.5550.6080.5610.5640.704
DisGeNETMAE0.0540.0540.0660.0640.0590.057
DrugComb BlissMAE4.3374.1564.5024.5114.4254.104
DrugComb CSSMAE16.48015.00016.38416.90014.74014.057
DrugComb HSAMAE4.3354.2094.4974.5204.3114.118
DrugComb LoeweMAE18.66517.33616.99416.91417.42817.381
DrugComb ZIPMAE3.9043.8074.1394.1414.0473.777
GDSC1PCC0.5450.8920.8610.8020.8760.887
GDSC2PCC0.5390.9120.8640.8230.8960.900
Half Life ObachSpearman0.4940.4580.3300.4140.5250.448
KIBAMSE0.5480.6330.7050.8520.7090.548
LD50 ZhuMAE0.6300.6280.7400.7050.8080.618
LeenaySpearman0.0670.2760.1280.0950.0480.083
Lipophilicity AstraZenecaMAE0.5650.5390.9850.8420.7790.587
OncoPolyPharmacologyPCC0.5180.5400.3590.1930.4180.552
PPBR AZMAE8.8899.02911.36710.89511.1389.108
Protein SAbDabMAE1.1061.2101.2681.1161.4321.268
Solubility AqSolDBMAE0.8680.8211.1591.1330.9310.987
TAPMAE4.4734.2804.8594.0835.0754.983
USPTOAccuracy0.0970.0840.0860.0910.2200.239
USPTO YieldsPCC0.0310.3950.0030.0260.0420.070
VDss LombardoSpearman0.6070.5600.3960.4070.4970.609
+ +Table S.17 | Model performance on binary classification tasks for models trained only on datasets with commercial licenses. TxGemma-Predict and TxGemma-Chat performances for each binary classification task, along with the metric type. + +
Task NameMetricTxGemma-2B-PredictTxGemma-9B-PredictTxGemma-27B-PredictTxGemma-9B-ChatTxGemma-27B-Chat
AMESAUROC0.8120.8030.8260.7230.729
BBB MartinsAUROC0.8830.8490.8990.8320.848
Bioavailability MaAUROC0.6880.6880.7240.6660.625
CYP1A2 VeithAUPRC0.9110.9140.9160.8620.817
CYP2C19 VeithAUROC0.9050.8970.8970.8440.823
CYP2C9 Substrate CarbonMangelsAUPRC0.4170.3900.4600.4140.375
CYP2C9 VeithAUPRC0.7870.8000.7930.7000.685
CYP2D6 Substrate CarbonMangelsAUPRC0.6260.6970.7060.6530.704
CYP2D6 VeithAUPRC0.6660.6620.6770.5170.422
CYP3A4 Substrate CarbonMangelsAUROC0.6380.6800.6920.6440.653
CYP3A4 VeithAUPRC0.8420.8390.8520.7600.747
Carcinogens LaguninAccuracy0.9110.8570.8750.8930.929
ClinToxAUROC0.9170.8150.8840.7160.595
DILIAUROC0.8290.8230.9270.6750.797
HIA HouAUROC0.9840.9540.9900.9060.927
HIVAUROC0.7810.7300.7680.6410.589
HuRIAUPRC0.7350.7670.7970.6850.620
MHC1 IEDB IMGT NielsenAUROC0.9300.9290.9330.8870.826
MHC2 IEDB JensenAUROC0.8550.8520.8550.7330.682
PAMPA NCATSAUROC0.6940.6300.7240.6840.659
Pgp BroccatelliAUROC0.9220.9320.9410.8730.920
SARSCOV2 3CLPro DiamondAUROC0.7480.7990.6760.7160.712
SARSCoV2 Vitro TouretAUROC0.6590.6220.5970.5270.516
SAbDab ChenAUPRC0.7260.7450.7930.5230.731
Skin ReactionAUROC0.6910.6240.7330.6210.571
Tox21AUROC0.8970.8930.8900.8180.797
ToxCastAUROC0.7870.7660.7970.7540.735
butkiewiczAUROC0.8110.7750.8260.6810.606
hERGAUROC0.9020.8900.8940.8550.829
hERG KarimAccuracy0.7780.7960.7720.6490.673
herg centralAUROC0.8900.8600.8920.8420.805
miRTarBaseAccuracy0.8180.8340.8020.6720.649
weberAUROC0.7500.6970.7490.6920.645
+ +* To predict whether compounds have Anti-HIV properties. + +Table S.18 | Model performance on regression and generation tasks for models trained only on datasets with commercial licenses. TxGemma-Predict and TxGemma-Chat performances for each regression or generation task, along with the metric type. + +
Task NameMetricTxGemma-2B-PredictTxGemma-9B-PredictTxGemma-27B-PredictTxGemma-9B-ChatTxGemma-27B-Chat
BindingDB PatentPCC0.5560.3760.5370.4380.118
BindingDB ic50Spearman0.4250.3130.4650.4430.361
BindingDB kdPCC0.4900.3930.2890.2070.156
BindingDB kiPCC0.7280.7120.6700.3870.218
Buchwald HartwigPCC0.9200.9180.9030.5740.818
Caco2 WangMAE0.6190.4910.4790.5880.383
Clearance Hepatocyte AZSpearman0.2920.3780.3500.1660.190
Clearance Microsome AZSpearman0.5210.5240.5100.3940.395
DAVISMSE0.5760.5640.5750.5610.561
DrugComb BlissMAE4.0884.2864.1574.4544.519
DrugComb CSSMAE14.56815.37014.92515.96016.649
DrugComb HSAMAE4.0634.2824.1784.4864.529
DrugComb LoeweMAE17.31317.86217.32717.19016.873
DrugComb ZIPMAE3.7373.8483.8234.0934.132
Half Life ObachSpearman0.4230.3480.4910.2690.393
KIBAMSE0.5620.5250.5540.8300.858
LD50 ZhuMAE0.6980.7180.6770.7240.721
LeenaySpearman0.1140.0890.2590.0780.183
Lipophilicity AstraZenecaMAE0.5710.6670.6130.8340.837
OncoPolyPharmacologyPCC0.5560.4370.5310.3880.148
PPBR AZMAE8.8139.1778.79211.00411.025
Protein SAbDabMAE1.1171.0221.0721.3481.173
Solubility AqSolDBMAE0.9111.1850.8021.1601.135
TAPMAE5.4984.8394.0884.6114.444
USPTOAccuracy0.3160.0410.2810.1450.090
USPTO YieldsPCC0.4710.0020.3500.1140.002
VDss LombardoSpearman0.5940.5380.5910.4100.487
+ +![](images/4060b21bb67dbad81e197effeba434dd94828fd7e278fae975f4ae41d84efdb2.jpg) +Figure S.7 | Performance of TxGemma and Gemma-2 models on MMLU. Score of TxGemma-27B-Chat, TxGemma-27B-Predict, and Gemma-2-27B on each subject of the MMLU benchmark [50], a comprehensive suite of 57 diverse tasks spanning mathematics, history, computer science, law, etc. This benchmark evaluates knowledge, reasoning, and problem-solving abilities across a wide range of academic subjects, providing a robust measure of overall language understanding. TxGemma-27B-Chat (73.87%) performs slightly below Gemma-2-27B (75.38%) but significantly better than TxGemma-27B-Predict (53.60%), suggesting that domain fine-tuning purely on therapeutic data can cause loss of information learned during base model training, but simultaneously training with general instruction-tuning data can compensate for this. + +Tell me about the blood-brain barrier + +# TxGemma + +The blood-brain barrier (BBB) is a highly selective semipermeable border that separates the circulating blood from the brain and extracellular fluid in the central nervous system (CNS). It plays a crucial role in maintaining a stable environment for the brain, protecting it from harmful substances and pathogens while allowing the passage of essential nutrients. + +Here are some key features of the BBB: + +Cellular composition: The BBB is primarily formed by endothelial cells lining the capillaries in the brain. These cells are tightly connected by specialized tight junctions that restrict the movement of molecules between them. + +Astrocytes and pericytes: These cells provide structural and functional support to the endothelial cells, contributing to the regulation of BBB permeability. + +Selective transport mechanisms: The BBB utilizes various transport systems to regulate the passage of molecules across its barrier. These include: + +Passive diffusion: Small lipophilic molecules can readily cross the BBB. + +Facilitated diffusion: Carrier-mediated transport systems assist the movement of specific molecules, such as glucose and amino acids. + +Active transport: Energy-dependent mechanisms pump molecules against their concentration gradient, such as the removal of toxins from the brain. + +P-glycoprotein (P-gp): This efflux pump actively removes a wide range of drugs from the brain, including many central nervous system-acting medications. + +*Impairment of the BBB can lead to various neurological disorders, including stroke, multiple sclerosis, and Alzheimer's disease. It can also allow the entry of harmful substances into the brain, such as pathogens and toxins. + +*Researchers are actively developing strategies to overcome the BBB for drug delivery to the brain, including using nanocarriers and focused ultrasound. + +Figure S.8 | Models can converse about general topics after fine-tuning only on the processed TDC data. Example of a dialogue with TxGemma-27B-Predict. When asked a question that is not in a processed TDC data format, the model is able to respond coherently. + +# Example of TxGemma-27B-Predict Failure in Conversation + +# Prompt + +Instructions: Answer the following question about drug properties. + +Context: As a membrane separating circulating blood and brain extracellular fluid, the blood-brain barrier (BBB) is the protection layer that blocks most foreign drugs. Thus the ability of a drug to penetrate the barrier to deliver to the site of action forms a crucial challenge in development of drugs for central nervous system. + +Question: Given a drug SMILES string, predict whether it + +(A) does not cross the BBB (B) crosses the BBB + +Drug SMILES: C1CN(CCC1(C=O)NC(CCO)C2=CC=C(C=C2)Cl)N)C3=NC=NC4=C3C=CN4 + +Answer: + +# Tx-Gemma-Predict Response + +Answer: (B) + +# Prompt + +What properties are useful for crossing the blood-brain barrier? Don't answer with (A) or (B) + +# Tx-Gemma-Predict Response + +Answer: Small molecular size, high lipophilicity, lack of ionization, and absence of extensive protein binding are generally favorable for BBB penetration. + +# Prompt + +Does the drug I gave you have these properties? Don't answer with (A) or (B) + +# Tx-Gemma-Predict Response + +Answer: (B) + +Figure S.9 | Predictive model struggles to hold a conversation about processed TDC data prompts after fine-tuning only on this data. Example of a multi-turn dialogue with TxGemma-27B-Predict. The initial turn is a question in the processed TDC data format. After the initial turn, the model is prompted to describe some properties associated with the problem of crossing the blood-brain barrier. An additional instruction to avoid answering with (A) or (B) is necessary. The model is able to provide some answers in natural language, but another turn leads to an inappropriate multiple choice answer. + +Provide five steps of reasoning to think through the problem and then provide an answer. Let's think step by step. Consider whether the drug would pass phase 1 trials, not whether the drug would be fully approved. You must also discuss the specific drug and disease as part of your reasoning that leads to your answer. This should be emphasized. + +Instructions: Answer the following question about clinical trials. + +Context: Clinical trial is the most time and cost-consuming step in the drug discovery process. Phase 1 clinical trials test the safety and basic properties of a new drug or treatment in a small group of people for the first time. Optimizing and designing trials with machine learning could drastically lead to the speedup of delivery of life-saving therapeutics to patients. Clinical trial outcome prediction is a machine learning task that aims to forecast the outcome of clinical trials, such as the approval rate of a drug or treatment. It utilizes various clinical trial features, including the drug's molecular structure and patient disease. + +Question: Given a drug SMILES string and disease, predict if the phase 1 trial (A) would not be approved (B) would be approved + +Drug SMILES: CC1=C(SC(=N1)NC(=O)N2CCCCC2(=O)N)C3=CC(=NC=C3)C(C)(C)(C(F)(F)F + +Disease: Malignant neoplasm of ovary: Ovarian cancer is a disease that affects women. In this form of cancer, certain cells in the ovary become abnormal and multiply uncontrollably to form a tumor. The ovaries are the female reproductive organs in which egg cells are produced. In about 90 percent of cases, ... + +Remember: (A) would not be approved (B) would be approved + +Reasoning: + +# TxGemma + +Here's a step-by-step reasoning process to predict the outcome of the Phase 1 trial: + +# 1. Analyze the Drug's Chemical Structure: + +Observation: The SMILES string indicates a complex molecule with multiple aromatic rings, fluorine atoms, and amide groups. + +Reasoning: Such structures often point to potential biological activity, but also raise concerns about potential toxicity and side effects. + +# 2. Consider the Disease: + +Observation: Ovarian cancer is a serious and often aggressive disease with limited treatment options. + +Reasoning: The unmet medical need is high, meaning there's a greater chance for a new drug to be approved if it shows any promise of efficacy. + +# 3. Evaluate Potential Toxicity: + +Observation: We don't have specific information about the drug's toxicity profile from the given data. + +Reasoning: Phase 1 trials primarily focus on safety. The drug's chemical structure suggests potential for toxicity, which would need to be carefully assessed in the trial. + +# 4. Assess the Likelihood of Demonstrating Safety: + +Observation: While the drug's structure raises concerns, there's no information suggesting inherent, severe toxicity that would preclude Phase 1 testing. + +Reasoning: Most drugs with novel structures undergo preclinical testing before reaching Phase 1. If the drug passed those tests, it likely has a reasonable chance of demonstrating acceptable safety in a small group of patients. + +# 5. Weighing the Factors: + +Observation: We have a potentially effective drug for a serious disease, but with potential toxicity concerns. + +Reasoning: Phase 1 trials are designed to assess safety and dosage. Even if the drug has some side effects, it's likely to be approved to proceed to further testing if it shows any signs of efficacy and the benefits outweigh the risks in the context of a serious disease. + +Answer: (B) would be approved + +Figure S.10 | Enabling reasoning with TxGemma-Chat on challenging tasks. Example of prompting TxGemma-27B-Chat to explain its prediction for predicting clinical trial approval, a relatively unintuitive task. A prefix is added before the prompt to provide instructions for reasoning, and a reminder is added at the end so the model correctly specifies the option corresponding to its desired answer. Lastly, the "Answer" text is changed to "Reasoning:" to enable reasoning steps. The reasoning provided by the model is not comprehensive but can provide useful insights into the drug-disease interaction. + +![](images/50aa41bd72408b2d1964da82f8d4a59c63c3870b25b0effe6eb0301bce2fba97.jpg) +Figure S.11 | Inference speed of TxGemma models. The number of examples inferred per day at different model sizes, normalized by the number of TPUv5e chips used for serving. The PPBR AZ task was used for the benchmarking due to its reasonable size. + +![](images/b821a33a107593bfb04127fe8375bcedb99541d1716ad89c30f99bc5c74a7f45.jpg) + +![](images/595c78f89cc375092a21ef0c686aac0157bd0fd7a3be1241a02d3da171359946.jpg) +Figure S.12 | Contamination analysis. (top) Out of 66 tasks, $23\%$ had some datapoints in the test set that were found in the Gemma-2 pretraining data, while $77\%$ did not. For tasks that had some contaminated datapoints, we plot the percent of the test set that was contaminated. (bottom) Distributions of cosine similarities between SMILES string embeddings and molecular name embeddings. Decoy name embeddings indicate a random different molecule name. + +![](images/a06c6ddce36e0a8cc6fde75d0a6734d0239ad730b94f9224094944eb29afbd53.jpg) +Figure S.13 | Model performance after filtering contaminated datapoints. Performance of TxGemma-27B-Predict on both original unfiltered test sets and filtered test sets in which contaminated datapoints were removed. (left) For these tasks, higher values correspond to better models, and the metrics are defined in Tables S.13 and S.14. Error bars are bootstrapped standard errors. (right) For these tasks, lower values correspond to better models, and the metrics (either MAE or MSE) are defined in Tables S.13 and S.14. Error bars are bootstrapped standard errors. + +![](images/5a77174bc677da9772e91115ac640fda919da96c5104c827eee2013682a15b8a.jpg) + +![](images/8831205666fac2be3162fb8e88173d029338f92c0c4193c627a45407203b69d9.jpg) +Figure S.14 | Breakdown of tool-usage frequency for Chemical Preference dataset and HLE dataset. Agentic-Tx adapts its tool usage to reason effectively about different tasks. For Chemical Preference, which requires evaluating drug candidates, the system correctly invokes tools for molecular characterization and safety assessment, such as SMILES description and toxicity prediction. For the Bio+Med task, focused on complex biomedical questions, the agent prioritizes PubMed and Wikipedia, demonstrating reliance on broad knowledge retrieval and synthesis. + +![](images/bb7daa0ab3745fb49912d1219615736ceb002d7d69fafd8c7a73819734f71a09.jpg) + +![](images/0dbc4689ed18c4fc12b7ae53b6420d0f6eb26f24706c74f033eb5e0918cf8339.jpg) +Figure S.15 | Breakdown of tool-usage per question in chemical preference dataset. Marker size represents usage count and corresponds to the number of uses per each tool; blue indicates accuracy increase, light red indicates decrease associated with each tool per question. We observe questions involve up to 8 tool calls. High usage of SMILES description and toxicity prediction correlates with improved performance. This demonstrates Agentic-Tx's adaptive tool selection to meet task requirements and improved performance. + +![](images/a9c5a0cec4eba54bfa2c62a6203b620f76c3bf7a0500cf31babec23662e84c49.jpg) +Figure S.16 | Ability to combine SMILES and text is independent of model size. Median relative change of TxGemma-27B-Predict, TxGemma-9B-Predict and TxGemma-2B-Predict performance from SOTA for tasks grouped by feature type. The signs were reversed for MAE and MSE metrics because lower MAE and MSE values correspond to better performances. The number of tasks in each feature type is displayed over each bar. In all models, over $90\%$ of tasks had a median relative performance change greater than -0.2, and SMILES + Text consistently outperformed SOTA. + +![](images/98955b38edb8949ab00f726cf43f2ccf404666fb5bdd25f71cd16900b64edab5.jpg) + +![](images/abcd5f2594648ea69cc611d92f1720d998830411dd4e40aca9dce191977179fb.jpg) + +![](images/53ef111b2c7457fddcdcb839e97bb4a0565136bb40ee6962ec4d103f9efc3a79.jpg) +Figure S.17 | Ablations of model sizes and model adaptations. (left) Relative performance changes for pairwise comparisons of TxGemma-Predict models (TxGemma-2B-Predict, TxGemma-9B-Predict, TxGemma-27B-Predict). (right) Relative performance changes of TxGemma models compared to their respective base models. + +![](images/97a2ed49d9a6a34eb3566d17f60c2015ee4e18897569ccfbdda77a9076bdca00.jpg) + +![](images/5440136090b3e3dd0547d3116cd363c58ded13e0ceb7ef554d37f45cd179852c.jpg) +Figure S.18 | TxGemma predictions show correlations between toxicity and clinical trial approval. Spearman correlation coefficients between toxicity predictions (measured by AMES, DILI, and hERG central) and clinical trial predictions (measured by Phase1, Phase2, and Phase3) on a set of PubChem molecules. + +# References + +1. Chen, J., Hu, Y., Wang, Y., Lu, Y., Cao, X., Lin, M., Xu, H., Wu, J., Xiao, C., Sun, J., et al. TrialBench: Multi-modal artificial intelligence-ready clinical trial datasets. arXiv preprint arXiv:2407.00631 (2024). +2. Kuo, K.-T., Mao, T.-L., Jones, S., Veras, E., Ayhan, A., Wang, T.-L., Glas, R., Slamon, D., Velculescu, V. E., Kuman, R. J., et al. Frequent activating mutations of PIK3CA in ovarian clear cell carcinoma. The American journal of pathology 174, 1597-1601 (2009). +3. Leontiadou, H., Galdadas, I., Athanasiou, C. & Cournia, Z. Insights into the mechanism of the PIK3CA E545K activating mutation using MD simulations. Scientific reports 8, 15544 (2018). +4. Chen, H., Si, Y., Wen, J., Hu, C., Xia, E., Wang, Y. & Wang, O. P110α inhibitor alpelisib exhibits a synergistic effect with pyrotinib and reverses pyrotinib resistant in HER2+ breast cancer. Neoplasia 43, 100913 (2023). +5. Fritsch, C., Huang, A., Chatenay-Rivauday, C., Schnell, C., Reddy, A., Liu, M., Kauffmann, A., Guthy, D., Erdmann, D., De Pover, A., et al. Characterization of the novel and specific PI3Kα inhibitor NVP-BYL719 and development of the patient stratification strategy for clinical trials. Molecular cancer therapeutics 13, 1117-1129 (2014). +6. Narayan, P., Prowell, T. M., Gao, J. J., Fernandes, L. L., Li, E., Jiang, X., Qiu, J., Fan, J., Song, P., Yu, J., et al. FDA approval summary: alpelisib plus fulvestrant for patients with HR-positive, HER2-negative, PIK3CA-mutated, advanced or metastatic breast cancer. Clinical Cancer Research 27, 1842-1849 (2021). +7. Passarelli, A., Carbone, V., Pignata, S., Mazzeo, R., Lorusso, D., Scambia, G., Canova, S., Di Palma, T., Tasca, G., Mantiero, M., et al. Alpelisib for PIK3CA-mutated advanced gynecological cancers: first clues of clinical activity. *Gynecologic Oncology* 183, 61-67 (2024). +8. Thibault, B., Thole, A., D'Angelo, R., Basset, C. & Guillermet-Guibert, J. PI3Kα-specific inhibitor BYL-719 synergizes with cisplatin in vitro in PIK3CA-mutated ovarian cancer cells. Scientific Reports 15, 6265 (2025). +9. Hu, X., Xia, M., Wang, J., Yu, H., Chai, J., Zhang, Z., Sun, Y., Su, J. & Sun, L. Dual PI3K/mTOR inhibitor PKI-402 suppresses the growth of ovarian cancer cells by degradation of Mcl-1 through autophagy. Biomedicine & Pharmacotherapy 129, 110397 (2020). +10. Turon, G., Hlozek, J., Woodland, J. G., Kumar, A., Chibale, K. & Duran-Frigola, M. First fully-automated AI/ML virtual screening cascade implemented at a drug discovery centre in Africa. Nature Communications 14, 5736 (2023). +11. Fontenot, R., Kathad, U., McDermott, J., Sturtevant, D., Sharma, P. & Carr, P. Predicting a Compounds Blood-Brain-Barrier Permeability with Lantern Pharma's AI and ML Platform, RADR 2023. +12. Bera, S., Dent, J., Gill, G., Stolman, A. & Wu, B. SimGCN for TDC Benchmarks (2022). +13. Plonka, W., Stork, C., Šićho, M. & Kirchmair, J. CYPlebrity: Machine learning models for the prediction of inhibitors of cytochrome P450 enzymes. Bioorganic & medicinal chemistry 46, 116388 (2021). +14. Hu, W., Liu, B., Gomes, J., Zitnik, M., Liang, P., Pande, V. & Leskovec, J. Strategies for pre-training graph neural networks. arXiv preprint arXiv:1905.12265 (2019). +15. Huang, K., Fu, T., Glass, L. M., Zitnik, M., Xiao, C. & Sun, J. DeepPurpose: a deep learning library for drug-target interaction prediction. Bioinformatics 36, 5545-5547 (2020). +16. Lagunin, A., Filimonov, D., Zakharov, A., Xie, W., Huang, Y., Zhu, F., Shen, T., Yao, J. & Poroikov, V. Computer-aided prediction of rodent carcinogenicity by PASS and CISOC-PSCT. QSAR & Combinatorial Science 28, 806-810 (2009). +17. Li, P., Li, Y., Hsieh, C.-Y., Zhang, S., Liu, X., Liu, H., Song, S. & Yao, X. TrimNet: learning molecular representation from triplet messages for biomedicine. Briefings in Bioinformatics 22, bbaa266 (2021). +18. Huang, D., Chowdhuri, S. R., Li, A., Li, A., Agrawal, A., Gano, K. & Zhu, A. A Unified System for Molecular Property Predictions: Oloren ChemEngine and its Applications (2022). +19. Li, J., Cai, D. & He, X. Learning graph-level representation for drug discovery. arXiv preprint arXiv:1709.03741 (2017). +20. Raimondi, D., Simm, J., Arany, A. & Moreau, Y. A novel method for data fusion over entity-relation graphs and its application to protein-protein interaction prediction. Bioinformatics 37, 2275-2281 (2021). +21. Gfeller, D., Schmidt, J., Croce, G., Guillaume, P., Bobisse, S., Genolet, R., Queiroz, L., Cesbron, J., Racle, J. & Harari, A. Improved predictions of antigen presentation and TCR recognition with MixMHCpred2. 2 and PRIME2. 0 reveal potent SARS-CoV-2 CD8+ T-cell epitopes. Cell Systems 14, 72-83 (2023). +22. Motmaen, A., Dauparas, J., Baek, M., Abedi, M. H., Baker, D. & Bradley, P. Peptide-binding specificity prediction using fine-tuned protein structure prediction networks. Proceedings of the National Academy of Sciences 120, e2216697120 (2023). +23. Siramshetty, V., Williams, J., Nguyen, D., Neyra, J., Southall, N., Mathé, E., Xu, X. & Shah, P. Validating ADME QSAR models using marketed drugs. SLAS DISCOVERY: Advancing the Science of Drug Discovery 26, 1326-1336 (2021). +24. Haneczok, J. & Delijewski, M. Machine learning enabled identification of potential SARS-CoV-2 3CLpro inhibitors based on fixed molecular fingerprints and Graph-CNN neural representations. Journal of Biomedical Informatics 119, 103821 (2021). +25. Liu, Y., Wu, Y., Shen, X. & Xie, L. COVID-19 multi-targeted drug repurposing using few-shot learning. Frontiers in Bioinformatics 1, 693177 (2021). +26. Chen, X., Dougherty, T., Hong, C., Schibler, R., Zhao, Y. C., Sadeghi, R., Matasci, N., Wu, Y.-C. & Kerman, I. Predicting antibody developability from sequence using machine learning. *biorxiv*, 2020-06 (2020). +27. Alves, V. M., Muratov, E., Fourches, D., Strickland, J., Kleinstreuer, N., Andrade, C. H. & Tropsha, A. Predicting chemically-induced skin reactions. Part I: QSAR models of skin sensitization and their application to identify potentially hazardous compounds. Toxicology and applied pharmacology 284, 262-272 (2015). +28. Shermukhamedov, S., Mamurjonova, D. & Probst, M. Structure to Property: Chemical Element Embeddings and a Deep Learning Approach for Accurate Prediction of Chemical Properties. arXiv preprint arXiv:2309.09355 (2023). + +29. Vu, O., Mendenhall, J., Altarawy, D. & Meiler, J. BCL.: Mol2D—a robust atom environment descriptor for QSAR modeling and lead optimization. Journal of computer-aided molecular design 33, 477–486 (2019). +30. Karim, A., Lee, M., Balle, T. & Sattar, A. CardioTox net: a robust predictor for hERG channel blockade based on deep learning meta-feature ensembles. Journal of Cheminformatics 13, 1-13 (2021). +31. Korotcov, A., Tkachenko, V., Russo, D. P. & Ekins, S. Comparison of deep learning with multiple machine learning methods and metrics using diverse drug discovery data sets. Molecular pharmaceutics 14, 4462-4475 (2017). +32. Wong, L., You, Z.-H., Guo, Z.-H., Yi, H.-C., Chen, Z.-H. & Cao, M.-Y. MIPDH: a novel computational model for predicting microRNA-mRNA interactions by DeepWalk on a heterogeneous network. ACS omega 5, 17022-17032 (2020). +33. Fu, T., Huang, K., Xiao, C., Glass, L. M. & Sun, J. Hint: Hierarchical interaction network for clinical-trial-outcome predictions. *Patterns* 3 (2022). +34. Weber, A., Born, J. & Rodriguez Martínez, M. TITAN: T-cell receptor specificity prediction with bimodal attention networks. Bioinformatics 37, i237-i244 (2021). +35. Lam, H. T., Sbodio, M. L., Galindo, M. M., Zayats, M., Fernandez-Diaz, R., Valls, V., Picco, G., Ramis, C. B. & Lopez, V. Otter-Knowledge: benchmarks of multimodal knowledge graph representation learning from different sources for drug discovery. arXiv preprint arXiv:2306.12802 (2023). +36. Kinnings, S. L., Liu, N., Tonge, P. J., Jackson, R. M., Xie, L. & Bourne, P. E. A machine learning-based method to improve docking scoring functions and its application to drug repurposing. Journal of chemical information and modeling 51, 408-419 (2011). +37. Kalemati, M., Zamani Emani, M. & Koohi, S. BiComp-DTA: Drug-target binding affinity prediction through complementary biological-related and compression-based featurization approach. PLOS Computational Biology 19, e1011036 (2023). +38. Wei, B. & Gong, X. DeepPLA: a novel deep learning-based model for protein-ligand binding affinity prediction (2021). +39. Probst, D., Schwaller, P. & Reymond, J.-L. Reaction classification and yield prediction using the differential reaction fingerprint DRFP. Digital discovery 1, 91-97 (2022). +40. Rivera, Z. A., Tayo, L., Chen, B.-Y. & Tsai, P.-W. In silico Evaluation of the Feasibility of Magnolia officinalis Electronshutting Compounds as Parkinson's Disease Remedy. Letters in Drug Design & Discovery 21, 3039-3048 (2024). +41. Pei, Q., Wu, L., Zhu, J., Xia, Y., Xie, S., Qin, T., Liu, H., Liu, T.-Y. & Yan, R. Breaking the barriers of data scarcity in drug-target affinity prediction. Briefings in Bioinformatics 24, bbad386 (2023). +42. Xia, F., Shukla, M., Brettin, T., Garcia-Cardona, C., Cohn, J., Allen, J. E., Maslov, S., Holbeck, S. L., Doroshow, J. H., Evrard, Y. A., et al. Predicting tumor cell line response to drug pairs with deep learning. BMC bioinformatics 19, 71-79 (2018). +43. Lind, A. P. & Anderson, P. C. Predicting drug activity against cancer cells by random forest models based on minimal genomic information and chemical properties. *PloS one* 14, e0219774 (2019). +44. Euclidia. https://github.com/euclidia/public-models. 2023. +45. Leenay, R. T., Aghazadeh, A., Hiatt, J., Tse, D., Roth, T. L., Apathy, R., Shifrut, E., Hultquist, J. F., Krogan, N., Wu, Z., et al. Large dataset enables prediction of repair after CRISPR-Cas9 editing in primary T cells. Nature biotechnology 37, 1034-1037 (2019). +46. Yang, K., Swanson, K., Jin, W., Coley, C., Eiden, P., Gao, H., Guzman-Perez, A., Hopper, T., Kelley, B., Mathea, M., et al. Analyzing learned molecular representations for property prediction. Journal of chemical information and modeling 59, 3370-3388 (2019). +47. Preuer, K., Lewis, R. P., Hochreiter, S., Bender, A., Bulusu, K. C. & Klambauer, G. DeepSynergy: predicting anti-cancer drug synergy with Deep Learning. Bioinformatics 34, 1538-1546 (2018). +48. Zheng, S., Rao, J., Zhang, Z., Xu, J. & Yang, Y. Predicting retrosynthetic reactions using self-corrected transformer neural networks. Journal of chemical information and modeling 60, 47-55 (2019). +49. Boral, N., Ghosh, P., Goswami, A. & Bhattacharyya, M. Accountable prediction of drug ADMET Properties with molecular descriptors. bioRxiv, 2022-06 (2022). +50. Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D. & Steinhardt, J. Measuring massive multitask language understanding. arXiv preprint arXiv:2009.03300 (2020). \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06196/images/0114d9a00ab47d79177889b7773fe64b8b18a5fc2fdf408061a2db63ed40fe38.jpg b/data/2025/2504_06xxx/2504.06196/images/0114d9a00ab47d79177889b7773fe64b8b18a5fc2fdf408061a2db63ed40fe38.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d7b35f69caee3f4690760f3e84f98274b7db0549 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/0114d9a00ab47d79177889b7773fe64b8b18a5fc2fdf408061a2db63ed40fe38.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ed0b51dd10207a2edb76d618551d0a7e6d9eb9ad86b7e293531bfecdd31907c +size 13042 diff --git a/data/2025/2504_06xxx/2504.06196/images/011981604316ec859899c67e2c3b9723288769a28b7e640db6e16cd5b20b778e.jpg b/data/2025/2504_06xxx/2504.06196/images/011981604316ec859899c67e2c3b9723288769a28b7e640db6e16cd5b20b778e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..97a1fa05dbf693e335c438c56ba6c0d3cf576867 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/011981604316ec859899c67e2c3b9723288769a28b7e640db6e16cd5b20b778e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a5fe3f50348a0de0f20437fbc6aa53d497fedd88a7d87ecc13990c78162a346 +size 210118 diff --git a/data/2025/2504_06xxx/2504.06196/images/0203c8f019b173fb8d62c4ee904f1f418162cc6b09d58945c721893ac5ac6ad4.jpg b/data/2025/2504_06xxx/2504.06196/images/0203c8f019b173fb8d62c4ee904f1f418162cc6b09d58945c721893ac5ac6ad4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f271cf4bc36ca0e4fc195a4595e5d09299e69a67 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/0203c8f019b173fb8d62c4ee904f1f418162cc6b09d58945c721893ac5ac6ad4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f15797ad3ccad29f8cf10a2a1ad75f27c2c7a19d19732e4ee119963daf287fe6 +size 91177 diff --git a/data/2025/2504_06xxx/2504.06196/images/09534e3b6d118249db6e3edff3bfb85233449f57161dcacc055ff510acfaca7c.jpg b/data/2025/2504_06xxx/2504.06196/images/09534e3b6d118249db6e3edff3bfb85233449f57161dcacc055ff510acfaca7c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..65618091677c9d8c5681bf82ebb72c76d7c1042b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/09534e3b6d118249db6e3edff3bfb85233449f57161dcacc055ff510acfaca7c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5da8819897890df58b57322ffc1beab37c2bfe89c5258175479e5a23bb6c9a43 +size 16144 diff --git a/data/2025/2504_06xxx/2504.06196/images/096e92d1c27b2658abaac8299e572989ee024c86006f93ac8bf5b2c2c46548d4.jpg b/data/2025/2504_06xxx/2504.06196/images/096e92d1c27b2658abaac8299e572989ee024c86006f93ac8bf5b2c2c46548d4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e96f8673929f4420396f6650064fe3beda475f4e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/096e92d1c27b2658abaac8299e572989ee024c86006f93ac8bf5b2c2c46548d4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abc6537fa159331d78487cc192a646ffde6f4a1467977da10abd20ad33e36745 +size 3626 diff --git a/data/2025/2504_06xxx/2504.06196/images/098d8717f5dbde8ee0685821cf521f28993021a44eb08614ac24af4103f4c735.jpg b/data/2025/2504_06xxx/2504.06196/images/098d8717f5dbde8ee0685821cf521f28993021a44eb08614ac24af4103f4c735.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4515c2e36266170ed72ae8b88fb8279f0f692ece --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/098d8717f5dbde8ee0685821cf521f28993021a44eb08614ac24af4103f4c735.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bec09a23785438c85537d5c91944c8a9dab681071701acb329cd33d3a3f9cea6 +size 296302 diff --git a/data/2025/2504_06xxx/2504.06196/images/0ab979193ce5715dcc7b650b9114e63d4a217e3ebcd07ca293810378aa73bd12.jpg b/data/2025/2504_06xxx/2504.06196/images/0ab979193ce5715dcc7b650b9114e63d4a217e3ebcd07ca293810378aa73bd12.jpg new file mode 100644 index 0000000000000000000000000000000000000000..49c36e431b5cb82ef6c270b72d8cef56b81fcd6b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/0ab979193ce5715dcc7b650b9114e63d4a217e3ebcd07ca293810378aa73bd12.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc488aa4527eb7aa7470ac66229d8a16bdd061dfba03921b6b30e4974833a1ce +size 14889 diff --git a/data/2025/2504_06xxx/2504.06196/images/0bb72c425ec48ac5375fe73446d4fae9bb535296e49f9ec67b1508fd86755108.jpg b/data/2025/2504_06xxx/2504.06196/images/0bb72c425ec48ac5375fe73446d4fae9bb535296e49f9ec67b1508fd86755108.jpg new file mode 100644 index 0000000000000000000000000000000000000000..809ac5c44cf6ada2cd2a468304dbe5db755d2516 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/0bb72c425ec48ac5375fe73446d4fae9bb535296e49f9ec67b1508fd86755108.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0b772c116ecfa3e2ac243b819755bc4b14410e48e85084920c8fcfdc5e055fd +size 235738 diff --git a/data/2025/2504_06xxx/2504.06196/images/0dbc4689ed18c4fc12b7ae53b6420d0f6eb26f24706c74f033eb5e0918cf8339.jpg b/data/2025/2504_06xxx/2504.06196/images/0dbc4689ed18c4fc12b7ae53b6420d0f6eb26f24706c74f033eb5e0918cf8339.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb95ef74be3a196e204a799ac067e8f614b76e4a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/0dbc4689ed18c4fc12b7ae53b6420d0f6eb26f24706c74f033eb5e0918cf8339.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1e12ee11b6abedaf75eab1c2f0971028d221dfbcbc079d9892565f7c88c1fea +size 40700 diff --git a/data/2025/2504_06xxx/2504.06196/images/0fc6f2350e51b7e6cd5a7bbc1317a452740c03d27b918c2dcd047504fcfadd35.jpg b/data/2025/2504_06xxx/2504.06196/images/0fc6f2350e51b7e6cd5a7bbc1317a452740c03d27b918c2dcd047504fcfadd35.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd3459b26f9229e10e96649fe7f4018e976dc4d5 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/0fc6f2350e51b7e6cd5a7bbc1317a452740c03d27b918c2dcd047504fcfadd35.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ec2d417974955db5a97e7cf39d603d230fb4f688f768df397c2130455ce26a8 +size 60825 diff --git a/data/2025/2504_06xxx/2504.06196/images/16baf39adc18c0e4e4aa59b45fb1b03adc1282067ff3e5c3d12ac88c14a15786.jpg b/data/2025/2504_06xxx/2504.06196/images/16baf39adc18c0e4e4aa59b45fb1b03adc1282067ff3e5c3d12ac88c14a15786.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d293182d86b9f6406fd117d0422bd895b99d92ad --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/16baf39adc18c0e4e4aa59b45fb1b03adc1282067ff3e5c3d12ac88c14a15786.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2272d9420947cfa1079b02b6b099c4c5094440b5507100d7e6ba8f4308483c3 +size 14595 diff --git a/data/2025/2504_06xxx/2504.06196/images/1996f43f70147e885c9ca69f311159a6643604936abffe7afc9aded5bd4946cc.jpg b/data/2025/2504_06xxx/2504.06196/images/1996f43f70147e885c9ca69f311159a6643604936abffe7afc9aded5bd4946cc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ac361d4c3edde1b8a87e2b4efed982a03e3f974 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/1996f43f70147e885c9ca69f311159a6643604936abffe7afc9aded5bd4946cc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de3f084027d33d065a252620e191ebfdc319eeefd8bf9f79836d6bdba01b98a2 +size 14515 diff --git a/data/2025/2504_06xxx/2504.06196/images/1b5ffa1d3820b6dfedd7efb2c60c35cd6a2033c4153cbd94273ca4ae0964a8d1.jpg b/data/2025/2504_06xxx/2504.06196/images/1b5ffa1d3820b6dfedd7efb2c60c35cd6a2033c4153cbd94273ca4ae0964a8d1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fecf4ec6e6e0ce7e844017fdc346f96b66357db9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/1b5ffa1d3820b6dfedd7efb2c60c35cd6a2033c4153cbd94273ca4ae0964a8d1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcb357e31e7ab9669e344c0016cb58806af9a74225aa0a52f03ebbb98230aa34 +size 202811 diff --git a/data/2025/2504_06xxx/2504.06196/images/229596d5615d8db9e940ffb05b38f2d2da4d2620458f30f93387cd1de47e9957.jpg b/data/2025/2504_06xxx/2504.06196/images/229596d5615d8db9e940ffb05b38f2d2da4d2620458f30f93387cd1de47e9957.jpg new file mode 100644 index 0000000000000000000000000000000000000000..09d5179a4536b1a53b6df258decb69ecd4177cd1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/229596d5615d8db9e940ffb05b38f2d2da4d2620458f30f93387cd1de47e9957.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:448568c0f0030cd985d33563851db8adc98fd5beb07d0c89452479d42438ce70 +size 17293 diff --git a/data/2025/2504_06xxx/2504.06196/images/29542d68bc684f7ee0c7db74899e9f4990c401e026299fb8377fbbe2aa9ea653.jpg b/data/2025/2504_06xxx/2504.06196/images/29542d68bc684f7ee0c7db74899e9f4990c401e026299fb8377fbbe2aa9ea653.jpg new file mode 100644 index 0000000000000000000000000000000000000000..534edcf8fdb1a0edc9fbb8b551a7699a65b2b2a8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/29542d68bc684f7ee0c7db74899e9f4990c401e026299fb8377fbbe2aa9ea653.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3287567b9f1774fc4e6fd3f761c339eeab788c793cb5580b829a9660b44be927 +size 25615 diff --git a/data/2025/2504_06xxx/2504.06196/images/2c3dedc41b7c9e61f949673cd15587fced5ab7c9ed1cd38b27f8cc2f630441e6.jpg b/data/2025/2504_06xxx/2504.06196/images/2c3dedc41b7c9e61f949673cd15587fced5ab7c9ed1cd38b27f8cc2f630441e6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3faf62e143e86e1e8ff2aa67fbb3de199111e71d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/2c3dedc41b7c9e61f949673cd15587fced5ab7c9ed1cd38b27f8cc2f630441e6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:605d7b412c8bd99d449a95764b8d5073a9caac8745a4772e09c7fc4737d92150 +size 14077 diff --git a/data/2025/2504_06xxx/2504.06196/images/2f9c4d08248fb222ff8b63a338289fdf3f2fba41aaaec627497361f9dcd3fb03.jpg b/data/2025/2504_06xxx/2504.06196/images/2f9c4d08248fb222ff8b63a338289fdf3f2fba41aaaec627497361f9dcd3fb03.jpg new file mode 100644 index 0000000000000000000000000000000000000000..365c7bb75dc561cfc359c9b3092f5a6c5b1efae4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/2f9c4d08248fb222ff8b63a338289fdf3f2fba41aaaec627497361f9dcd3fb03.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec7e87460531b3204d0d12df358a80c244084c24cdde8442f17fd1163e4cdc42 +size 165269 diff --git a/data/2025/2504_06xxx/2504.06196/images/327ad9521c9b1211442d7f2d946b5d27f8bc523409af6030fb32861ad17716a2.jpg b/data/2025/2504_06xxx/2504.06196/images/327ad9521c9b1211442d7f2d946b5d27f8bc523409af6030fb32861ad17716a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1fd231575bf25f9bebd5f974da6e747389e05ef6 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/327ad9521c9b1211442d7f2d946b5d27f8bc523409af6030fb32861ad17716a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31f20ec8ce7a1d69e9596db448ebddce26b5a4ed1c988bd41849856fe3222262 +size 94781 diff --git a/data/2025/2504_06xxx/2504.06196/images/3485a458e0e83c0575b63d1e81dbaa9c3d70d585063afc9a1cb9ef52ff9240c6.jpg b/data/2025/2504_06xxx/2504.06196/images/3485a458e0e83c0575b63d1e81dbaa9c3d70d585063afc9a1cb9ef52ff9240c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..00de07c175ae79b67534938ae80d4434144af29c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/3485a458e0e83c0575b63d1e81dbaa9c3d70d585063afc9a1cb9ef52ff9240c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29c92d4728dec850463b5c8370c2091086844f2fdf82d1f5b8f512ac10f14ed8 +size 22438 diff --git a/data/2025/2504_06xxx/2504.06196/images/3765c58d0679b30005a3a6896f3f4d67936c40efceb092bbd2821ab8663df4b7.jpg b/data/2025/2504_06xxx/2504.06196/images/3765c58d0679b30005a3a6896f3f4d67936c40efceb092bbd2821ab8663df4b7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e73752fede1f0fe9c64b8cb47899f6f11dfa94b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/3765c58d0679b30005a3a6896f3f4d67936c40efceb092bbd2821ab8663df4b7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:104d679d1c2694a156bdd9769522f9fdeffc4aaddc5064d6b9c6a416b0b92ca8 +size 21176 diff --git a/data/2025/2504_06xxx/2504.06196/images/3cd0a375ee3d7de3a86068d0c489eab4f7df74f4ad4e1134db7d492a2c938c19.jpg b/data/2025/2504_06xxx/2504.06196/images/3cd0a375ee3d7de3a86068d0c489eab4f7df74f4ad4e1134db7d492a2c938c19.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0523896b9aabaddf819d16985c60995ec33dd403 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/3cd0a375ee3d7de3a86068d0c489eab4f7df74f4ad4e1134db7d492a2c938c19.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ba2571d2495efefa74090659138511349e6c249439f6e88b3a655bdce714950 +size 14312 diff --git a/data/2025/2504_06xxx/2504.06196/images/4060b21bb67dbad81e197effeba434dd94828fd7e278fae975f4ae41d84efdb2.jpg b/data/2025/2504_06xxx/2504.06196/images/4060b21bb67dbad81e197effeba434dd94828fd7e278fae975f4ae41d84efdb2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5ad210112b320aa8fd9bd983111f03963897076c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/4060b21bb67dbad81e197effeba434dd94828fd7e278fae975f4ae41d84efdb2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c050ace119f36d0fd3167e97c9bc3f32a7609c194de335ca099dc276d24812d +size 222809 diff --git a/data/2025/2504_06xxx/2504.06196/images/42c41199fcf8e1c1b61be220eadee67478dcac28dec9f92bb963ec83de23b969.jpg b/data/2025/2504_06xxx/2504.06196/images/42c41199fcf8e1c1b61be220eadee67478dcac28dec9f92bb963ec83de23b969.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9430e27962b5a9d20e43e4c0a59f940fb9101ca4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/42c41199fcf8e1c1b61be220eadee67478dcac28dec9f92bb963ec83de23b969.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74e17297f89264fa06e3f5111c63131f0946cb93240e16d84e6937c97d90491f +size 54770 diff --git a/data/2025/2504_06xxx/2504.06196/images/45bd5bb59ecbb59ebf3c3bd02e1a6e2b8cdf58c01e28987f97b63a2162234f73.jpg b/data/2025/2504_06xxx/2504.06196/images/45bd5bb59ecbb59ebf3c3bd02e1a6e2b8cdf58c01e28987f97b63a2162234f73.jpg new file mode 100644 index 0000000000000000000000000000000000000000..731c903759c9d36169b83e3fa8957fb273d2b5e7 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/45bd5bb59ecbb59ebf3c3bd02e1a6e2b8cdf58c01e28987f97b63a2162234f73.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e2760b1c90fa0b42c37a8fd6ccd48d77c1b3660e013a6aa30c45d6749378a01 +size 17868 diff --git a/data/2025/2504_06xxx/2504.06196/images/4d2824d594ed2d7abd228cce1d0df9ff221c1c6c2479fa2ed84c0df88c6e7cac.jpg b/data/2025/2504_06xxx/2504.06196/images/4d2824d594ed2d7abd228cce1d0df9ff221c1c6c2479fa2ed84c0df88c6e7cac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c58bec1438f75cfd9fe659fff3a420efc307e67 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/4d2824d594ed2d7abd228cce1d0df9ff221c1c6c2479fa2ed84c0df88c6e7cac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42c7c92fce0934a6c106188cd8ce347df53bd6e4cebd6a3da7909b37b13728bf +size 115768 diff --git a/data/2025/2504_06xxx/2504.06196/images/4dfa44b4877c0dff07f7e32085f8af1e800f4ac8bc4d1d5740a2f5789b0e102d.jpg b/data/2025/2504_06xxx/2504.06196/images/4dfa44b4877c0dff07f7e32085f8af1e800f4ac8bc4d1d5740a2f5789b0e102d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6c75360cf2b33c4ae30904223f4c98e89b49c4c9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/4dfa44b4877c0dff07f7e32085f8af1e800f4ac8bc4d1d5740a2f5789b0e102d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca571e2d4a6c11d6eee43e84b45370777fb02943e2946fce8f070bb2c5955b7f +size 16038 diff --git a/data/2025/2504_06xxx/2504.06196/images/5019a41ee3d7b9b09cfa6b183c6cca835c79d3fc3f8b7e470d8b3d39c395d574.jpg b/data/2025/2504_06xxx/2504.06196/images/5019a41ee3d7b9b09cfa6b183c6cca835c79d3fc3f8b7e470d8b3d39c395d574.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bfed83abcb56bb51a82876110ccb80806955e23a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/5019a41ee3d7b9b09cfa6b183c6cca835c79d3fc3f8b7e470d8b3d39c395d574.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfd710f849b1910890bfc4c12a42584eee599e40e8c5266fb405c6f826e990f8 +size 14246 diff --git a/data/2025/2504_06xxx/2504.06196/images/50aa41bd72408b2d1964da82f8d4a59c63c3870b25b0effe6eb0301bce2fba97.jpg b/data/2025/2504_06xxx/2504.06196/images/50aa41bd72408b2d1964da82f8d4a59c63c3870b25b0effe6eb0301bce2fba97.jpg new file mode 100644 index 0000000000000000000000000000000000000000..086f693d5b89d137e3ad944acc9a5fc4def3b383 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/50aa41bd72408b2d1964da82f8d4a59c63c3870b25b0effe6eb0301bce2fba97.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e478f3ec2aba79322029ff1687cf573071a6a136327eb3bae5d86ffc4ba052ef +size 22429 diff --git a/data/2025/2504_06xxx/2504.06196/images/53ef111b2c7457fddcdcb839e97bb4a0565136bb40ee6962ec4d103f9efc3a79.jpg b/data/2025/2504_06xxx/2504.06196/images/53ef111b2c7457fddcdcb839e97bb4a0565136bb40ee6962ec4d103f9efc3a79.jpg new file mode 100644 index 0000000000000000000000000000000000000000..810e55d684b207e2507e98176e33778465a6d6b3 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/53ef111b2c7457fddcdcb839e97bb4a0565136bb40ee6962ec4d103f9efc3a79.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90b9544c407d7e98cb5c3113238ed63774d6ea5a16b603c3e62de4d0e524d625 +size 25002 diff --git a/data/2025/2504_06xxx/2504.06196/images/5440136090b3e3dd0547d3116cd363c58ded13e0ceb7ef554d37f45cd179852c.jpg b/data/2025/2504_06xxx/2504.06196/images/5440136090b3e3dd0547d3116cd363c58ded13e0ceb7ef554d37f45cd179852c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..38a3290d1c1eea52edccf12a9107f58c1a18b18f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/5440136090b3e3dd0547d3116cd363c58ded13e0ceb7ef554d37f45cd179852c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e809a084cb8da6ed9cb72f66f634fec8939aee8b1c91b6cc6cec23c1689e4a3e +size 19762 diff --git a/data/2025/2504_06xxx/2504.06196/images/595c78f89cc375092a21ef0c686aac0157bd0fd7a3be1241a02d3da171359946.jpg b/data/2025/2504_06xxx/2504.06196/images/595c78f89cc375092a21ef0c686aac0157bd0fd7a3be1241a02d3da171359946.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e5f63c1fddd597bd333f6b62df121ed549ab38b4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/595c78f89cc375092a21ef0c686aac0157bd0fd7a3be1241a02d3da171359946.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c25bc7346e3bd666ab2ab531775a26dbc8be8d2bfe1d8e87709c5544ada708b2 +size 22249 diff --git a/data/2025/2504_06xxx/2504.06196/images/5a77174bc677da9772e91115ac640fda919da96c5104c827eee2013682a15b8a.jpg b/data/2025/2504_06xxx/2504.06196/images/5a77174bc677da9772e91115ac640fda919da96c5104c827eee2013682a15b8a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d989a7686ee44493b440165f48bc99e4d4a15b34 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/5a77174bc677da9772e91115ac640fda919da96c5104c827eee2013682a15b8a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3638119e74bf2877de74522f05533d28e96aeb5788ae0144d6055ddb91e253d5 +size 22483 diff --git a/data/2025/2504_06xxx/2504.06196/images/6c106798dfbe627a04549474626d78648a113dce271a2871c66142c409a94aba.jpg b/data/2025/2504_06xxx/2504.06196/images/6c106798dfbe627a04549474626d78648a113dce271a2871c66142c409a94aba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f888c4e3c6b250e95bc96056a1fa52093cc889a9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/6c106798dfbe627a04549474626d78648a113dce271a2871c66142c409a94aba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2db48ce89a21cf1e79fb7a8de8a7de765b32879c24b9c1b3f171f86f45bd2104 +size 326210 diff --git a/data/2025/2504_06xxx/2504.06196/images/6c5ae632ad262260986f1fe3efcfbe3a474c92834958a1bd7c97690782bbf10d.jpg b/data/2025/2504_06xxx/2504.06196/images/6c5ae632ad262260986f1fe3efcfbe3a474c92834958a1bd7c97690782bbf10d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c138246d5a40192f7dbb10a28e9fcbf977a2a4de --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/6c5ae632ad262260986f1fe3efcfbe3a474c92834958a1bd7c97690782bbf10d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3706182f0d13940b90d120c8e47c76ee1d81b495d0159f8a3106307a3555d05 +size 24617 diff --git a/data/2025/2504_06xxx/2504.06196/images/6c7190df8fb3e455a4678103a2125ca56783f3046dad39ad10081da4aeb70ed1.jpg b/data/2025/2504_06xxx/2504.06196/images/6c7190df8fb3e455a4678103a2125ca56783f3046dad39ad10081da4aeb70ed1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..efac4c16de62ec01186cb7ecc8d704cd37e8a4d5 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/6c7190df8fb3e455a4678103a2125ca56783f3046dad39ad10081da4aeb70ed1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38b11bf9f0532ac1dc8b80f53a68c34e7e987bb9409d67f054251ebd958ac31e +size 18482 diff --git a/data/2025/2504_06xxx/2504.06196/images/6eb51e87119992d386790213cb24f8ca1436c165f652aa4c124e908de08b04eb.jpg b/data/2025/2504_06xxx/2504.06196/images/6eb51e87119992d386790213cb24f8ca1436c165f652aa4c124e908de08b04eb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a05a941f84ef0e58d9cc8ce63a0cf06f5c0073f8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/6eb51e87119992d386790213cb24f8ca1436c165f652aa4c124e908de08b04eb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68244e6c1ee5cb8a1171b57ee11c4d14e782d80018d8ccac2aee1e0722e4fd6f +size 15661 diff --git a/data/2025/2504_06xxx/2504.06196/images/778634aa5c8b1460d1e1005fbb3e9371ca11ba143cab37feb55f39531fd8284d.jpg b/data/2025/2504_06xxx/2504.06196/images/778634aa5c8b1460d1e1005fbb3e9371ca11ba143cab37feb55f39531fd8284d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a50e51646e95fa890b697c9c81369c0bc391c0f5 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/778634aa5c8b1460d1e1005fbb3e9371ca11ba143cab37feb55f39531fd8284d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd8b24419ae6e13786e02a678ce54ca97cb7f7c86563b458e78983331ac9d5e6 +size 21453 diff --git a/data/2025/2504_06xxx/2504.06196/images/83c7e9c7794221b2cb751ba8e52d2d2e019f0563012fa1743854d7b8454bd866.jpg b/data/2025/2504_06xxx/2504.06196/images/83c7e9c7794221b2cb751ba8e52d2d2e019f0563012fa1743854d7b8454bd866.jpg new file mode 100644 index 0000000000000000000000000000000000000000..896f9854538b33b81be61281ea8958a6f509d192 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/83c7e9c7794221b2cb751ba8e52d2d2e019f0563012fa1743854d7b8454bd866.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eed4d6ca3531a5396969edc65e71725983dcb0cf34239cd1206805485f59dc12 +size 3767 diff --git a/data/2025/2504_06xxx/2504.06196/images/868848d097319f43df2f2324861cfe2384f376dcae1a95d6563e3f7b212d5ba8.jpg b/data/2025/2504_06xxx/2504.06196/images/868848d097319f43df2f2324861cfe2384f376dcae1a95d6563e3f7b212d5ba8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cb97f61b1cb2678bfe7cdf96b65dbc849dd50f52 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/868848d097319f43df2f2324861cfe2384f376dcae1a95d6563e3f7b212d5ba8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6c9fec49da98d8c890907cb6c81ca53196921a02630bbca538fac3133747962 +size 15680 diff --git a/data/2025/2504_06xxx/2504.06196/images/8831205666fac2be3162fb8e88173d029338f92c0c4193c627a45407203b69d9.jpg b/data/2025/2504_06xxx/2504.06196/images/8831205666fac2be3162fb8e88173d029338f92c0c4193c627a45407203b69d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0082e96b291dcfe078052c7af7e9cedc55aa417f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/8831205666fac2be3162fb8e88173d029338f92c0c4193c627a45407203b69d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4732a6ba575a61c9676e8320d9c5816ec0358a1de3d5d74680570049366f51d0 +size 23417 diff --git a/data/2025/2504_06xxx/2504.06196/images/8b46427efa8d29aead6de741c144673fa93c545121e1a5c5fd8f01be42910034.jpg b/data/2025/2504_06xxx/2504.06196/images/8b46427efa8d29aead6de741c144673fa93c545121e1a5c5fd8f01be42910034.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c910aa29b0bfa0c92199a04c9314425ad2abdec0 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/8b46427efa8d29aead6de741c144673fa93c545121e1a5c5fd8f01be42910034.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4219b3cefa046e7076e47fff91c83d55d07c9e28560a67006a4079a774e9d67 +size 15125 diff --git a/data/2025/2504_06xxx/2504.06196/images/97948ef7ade23a9c58cba5b5f186c03c35fd709d4e7765967ffad5cb11de2ff1.jpg b/data/2025/2504_06xxx/2504.06196/images/97948ef7ade23a9c58cba5b5f186c03c35fd709d4e7765967ffad5cb11de2ff1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c9d357296054d41c3af83cd850f3e9631b68c61a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/97948ef7ade23a9c58cba5b5f186c03c35fd709d4e7765967ffad5cb11de2ff1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c39ac34cd30de989ef8fa91befd85da96d7ef8987d8947b2e068aa03083058c +size 176969 diff --git a/data/2025/2504_06xxx/2504.06196/images/97a2ed49d9a6a34eb3566d17f60c2015ee4e18897569ccfbdda77a9076bdca00.jpg b/data/2025/2504_06xxx/2504.06196/images/97a2ed49d9a6a34eb3566d17f60c2015ee4e18897569ccfbdda77a9076bdca00.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0a9992403488b85768f7c082881d2c84c86b05f4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/97a2ed49d9a6a34eb3566d17f60c2015ee4e18897569ccfbdda77a9076bdca00.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51dd8f43484be05dccbeb6b941cf38feff4cc3c318b761984948ceeb927411bf +size 24544 diff --git a/data/2025/2504_06xxx/2504.06196/images/98955b38edb8949ab00f726cf43f2ccf404666fb5bdd25f71cd16900b64edab5.jpg b/data/2025/2504_06xxx/2504.06196/images/98955b38edb8949ab00f726cf43f2ccf404666fb5bdd25f71cd16900b64edab5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..894cd13f6f801d8aa54da0a6e6621c0087ccca65 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/98955b38edb8949ab00f726cf43f2ccf404666fb5bdd25f71cd16900b64edab5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8eed3c0b0a803254d04c456622f580ae1f623596451341578fc8399e823e068c +size 19177 diff --git a/data/2025/2504_06xxx/2504.06196/images/9b152fe3c52d378f2d027185f60d5bd699d4d295bd539075317ae94dfc9ac7a1.jpg b/data/2025/2504_06xxx/2504.06196/images/9b152fe3c52d378f2d027185f60d5bd699d4d295bd539075317ae94dfc9ac7a1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..888c8f9c336e8b91dcddb9381f87b91041b80018 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/9b152fe3c52d378f2d027185f60d5bd699d4d295bd539075317ae94dfc9ac7a1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e206d34c32fca7436ae35b48a4bbda5359d410ade0a098f3fad5aa6c8e082a84 +size 14181 diff --git a/data/2025/2504_06xxx/2504.06196/images/9c83b0f019f49296d6ba47ff826893c3814c3507cfe6ad3bd8589e6ca7128176.jpg b/data/2025/2504_06xxx/2504.06196/images/9c83b0f019f49296d6ba47ff826893c3814c3507cfe6ad3bd8589e6ca7128176.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7c4aba2c52c06f9643e3c17697853140c24071ea --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/9c83b0f019f49296d6ba47ff826893c3814c3507cfe6ad3bd8589e6ca7128176.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a381fb4012083641af64143c06263276345c7a073ad817a2ec901b14b2d1061d +size 257318 diff --git a/data/2025/2504_06xxx/2504.06196/images/9d4476e7572c6c23be88f9ba85d88ce520cdc474eb566aa40abb2c1e2e6cb7ea.jpg b/data/2025/2504_06xxx/2504.06196/images/9d4476e7572c6c23be88f9ba85d88ce520cdc474eb566aa40abb2c1e2e6cb7ea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..33890bf57d624949b13402e0d36173b0d0eb50e3 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/9d4476e7572c6c23be88f9ba85d88ce520cdc474eb566aa40abb2c1e2e6cb7ea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3e245bd3ad38b640e25f09d10918b9343730cf52e19217e39b0899cb992edc5 +size 13441 diff --git a/data/2025/2504_06xxx/2504.06196/images/9fb69e97ac4ee81f26e53d78fe24373e98bbbb61c0bd4b22e0baf2713a888d55.jpg b/data/2025/2504_06xxx/2504.06196/images/9fb69e97ac4ee81f26e53d78fe24373e98bbbb61c0bd4b22e0baf2713a888d55.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c87e432cb0e11382b19f2b27f5638227ea24a078 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/9fb69e97ac4ee81f26e53d78fe24373e98bbbb61c0bd4b22e0baf2713a888d55.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5305d57649f044850a126c025829b799d61cde4c004ef0e59ef7f7c17c2055e8 +size 334011 diff --git a/data/2025/2504_06xxx/2504.06196/images/a06c6ddce36e0a8cc6fde75d0a6734d0239ad730b94f9224094944eb29afbd53.jpg b/data/2025/2504_06xxx/2504.06196/images/a06c6ddce36e0a8cc6fde75d0a6734d0239ad730b94f9224094944eb29afbd53.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f99620fa5ef520ef6eb937f3026bc1eb62ef7aa9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/a06c6ddce36e0a8cc6fde75d0a6734d0239ad730b94f9224094944eb29afbd53.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa82dfaa8573b99ad0c3749e7b7593ac67f1cd86e9e20a904fd412526a6960a8 +size 38219 diff --git a/data/2025/2504_06xxx/2504.06196/images/a3c869088761c5ea30145738fb0e7de8fb845000d423b65d65ef9ab5098f21ca.jpg b/data/2025/2504_06xxx/2504.06196/images/a3c869088761c5ea30145738fb0e7de8fb845000d423b65d65ef9ab5098f21ca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2cb8f54d304d6821f1f07c702bc994f2bcb87a55 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/a3c869088761c5ea30145738fb0e7de8fb845000d423b65d65ef9ab5098f21ca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73b6144fdefd6f5dccb630da9a56cc9f8a0904f53c53281c6ec334466ef20962 +size 34328 diff --git a/data/2025/2504_06xxx/2504.06196/images/a97b57e1e6b69f48df9b46566fd0c5232a634bcaa152ee1d7db532c7273836dd.jpg b/data/2025/2504_06xxx/2504.06196/images/a97b57e1e6b69f48df9b46566fd0c5232a634bcaa152ee1d7db532c7273836dd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cf177f702d3baf428e102926294451f4c57805e8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/a97b57e1e6b69f48df9b46566fd0c5232a634bcaa152ee1d7db532c7273836dd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31acc5f01146ca0098bdba6a752cbc9ec4dab4073894a10fcfd4ced5d9c24f84 +size 27104 diff --git a/data/2025/2504_06xxx/2504.06196/images/a9c5a0cec4eba54bfa2c62a6203b620f76c3bf7a0500cf31babec23662e84c49.jpg b/data/2025/2504_06xxx/2504.06196/images/a9c5a0cec4eba54bfa2c62a6203b620f76c3bf7a0500cf31babec23662e84c49.jpg new file mode 100644 index 0000000000000000000000000000000000000000..60777280fedf38678e46616dd86cbd6bffb52268 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/a9c5a0cec4eba54bfa2c62a6203b620f76c3bf7a0500cf31babec23662e84c49.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:625fc9da6d7bbff8880530762ff8c46069256a85280b0bf1a7032eccdb68e010 +size 19148 diff --git a/data/2025/2504_06xxx/2504.06196/images/abcd5f2594648ea69cc611d92f1720d998830411dd4e40aca9dce191977179fb.jpg b/data/2025/2504_06xxx/2504.06196/images/abcd5f2594648ea69cc611d92f1720d998830411dd4e40aca9dce191977179fb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ba84d5309e4f1d95c9e75ca23fbbeae5e30f17d7 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/abcd5f2594648ea69cc611d92f1720d998830411dd4e40aca9dce191977179fb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ef399eda4862f69e8cf7996a7644f8bb8e27fd2393736cdb99bed8b52d0617c +size 19260 diff --git a/data/2025/2504_06xxx/2504.06196/images/b32c8ecb923b6ef6b0bb7d90b88ccffdb27bb8885edf80d5efadf8fd7a85e95f.jpg b/data/2025/2504_06xxx/2504.06196/images/b32c8ecb923b6ef6b0bb7d90b88ccffdb27bb8885edf80d5efadf8fd7a85e95f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c08e04d31242a4faba12f34d884372098c6efe5 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/b32c8ecb923b6ef6b0bb7d90b88ccffdb27bb8885edf80d5efadf8fd7a85e95f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3788063fa187c2fd0f48235aa4bbc9e7363c1be10d4a4557fae83bcabbaeb058 +size 274530 diff --git a/data/2025/2504_06xxx/2504.06196/images/b821a33a107593bfb04127fe8375bcedb99541d1716ad89c30f99bc5c74a7f45.jpg b/data/2025/2504_06xxx/2504.06196/images/b821a33a107593bfb04127fe8375bcedb99541d1716ad89c30f99bc5c74a7f45.jpg new file mode 100644 index 0000000000000000000000000000000000000000..004f24ac0003c3223aaef6c1356596fe1acd2c70 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/b821a33a107593bfb04127fe8375bcedb99541d1716ad89c30f99bc5c74a7f45.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdd4dd875c28974e96117fbf522be5e7cfe12b8eb76535eb86365540d2f682fe +size 42962 diff --git a/data/2025/2504_06xxx/2504.06196/images/b9d2298fd6339b4d44d87216064cb18663a965c2d62e1de82868a8b0f2abce73.jpg b/data/2025/2504_06xxx/2504.06196/images/b9d2298fd6339b4d44d87216064cb18663a965c2d62e1de82868a8b0f2abce73.jpg new file mode 100644 index 0000000000000000000000000000000000000000..22606c251b9e10246d4f62ca81d2fc650cd70c27 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/b9d2298fd6339b4d44d87216064cb18663a965c2d62e1de82868a8b0f2abce73.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43b8c3d3663e72d0195af788d7daf3f490528fb68ca97ce4d8655838fd550e93 +size 396996 diff --git a/data/2025/2504_06xxx/2504.06196/images/bb7daa0ab3745fb49912d1219615736ceb002d7d69fafd8c7a73819734f71a09.jpg b/data/2025/2504_06xxx/2504.06196/images/bb7daa0ab3745fb49912d1219615736ceb002d7d69fafd8c7a73819734f71a09.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5c707a1f6b55f3f47579336f9167472a8ce33698 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/bb7daa0ab3745fb49912d1219615736ceb002d7d69fafd8c7a73819734f71a09.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e27dbc0455a5ff88ca3c6ccc0fe0a6f94cfff1bcbb67c15c336d987b25d7ead +size 24565 diff --git a/data/2025/2504_06xxx/2504.06196/images/c48b857842aeac69278aa4332949b757fe4bc93f85a7901073f2eedfb083c3c4.jpg b/data/2025/2504_06xxx/2504.06196/images/c48b857842aeac69278aa4332949b757fe4bc93f85a7901073f2eedfb083c3c4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b7780907ff547ef96f0efefb3c4a26ccca9dbe49 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/c48b857842aeac69278aa4332949b757fe4bc93f85a7901073f2eedfb083c3c4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b72533cd702628e1a00f0d9fdd1ea98d5d13b9fc89de45d1ba460f6ca10069bf +size 17198 diff --git a/data/2025/2504_06xxx/2504.06196/images/cff609f1cff46876c76c043fcbdff25eab187a12d98a2bd260d1ab40fc7d3e3e.jpg b/data/2025/2504_06xxx/2504.06196/images/cff609f1cff46876c76c043fcbdff25eab187a12d98a2bd260d1ab40fc7d3e3e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ae51723b0c7012a5e9b611190e0537de2ba71f42 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/cff609f1cff46876c76c043fcbdff25eab187a12d98a2bd260d1ab40fc7d3e3e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:803c740604f56b9786769b53bb03c411a336a0a389a73469d8bad5b6a8ce70c2 +size 25902 diff --git a/data/2025/2504_06xxx/2504.06196/images/d2323cf4b228f6cb908f1814d922975c4028f6cd3bba08c079fa90cf5aa14728.jpg b/data/2025/2504_06xxx/2504.06196/images/d2323cf4b228f6cb908f1814d922975c4028f6cd3bba08c079fa90cf5aa14728.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c4197f9d61691e44fe50ff4419e220610608c918 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/d2323cf4b228f6cb908f1814d922975c4028f6cd3bba08c079fa90cf5aa14728.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff1dcbc57312ffa8406eb1c4f3c00497972f8683be61f7ecf9870ba62c1aaed2 +size 71593 diff --git a/data/2025/2504_06xxx/2504.06196/images/d64008d7bba648430f646cee132bb06a7242ca81f3335858adf4a949d8bac876.jpg b/data/2025/2504_06xxx/2504.06196/images/d64008d7bba648430f646cee132bb06a7242ca81f3335858adf4a949d8bac876.jpg new file mode 100644 index 0000000000000000000000000000000000000000..47edbb16e17bfc5ff689f645e5c9351ee5a5baae --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/d64008d7bba648430f646cee132bb06a7242ca81f3335858adf4a949d8bac876.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2032ce79d158b3e81ddf19162976873855e6f3cc94b8687495c13a2638525e43 +size 10630 diff --git a/data/2025/2504_06xxx/2504.06196/images/d6c50205b6b2c913f7c73865098c30a77a468c56c5c6ec1feffbffeeab77100e.jpg b/data/2025/2504_06xxx/2504.06196/images/d6c50205b6b2c913f7c73865098c30a77a468c56c5c6ec1feffbffeeab77100e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..876eedd9ed954be596f25ab8d0d761d9e34030fb --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/d6c50205b6b2c913f7c73865098c30a77a468c56c5c6ec1feffbffeeab77100e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb68ae059364bbd91f165cce2fdc1dde095fed8378af2049bd33d816e103a19b +size 51230 diff --git a/data/2025/2504_06xxx/2504.06196/images/d7dcc741662b146f89dbd3da83f6492c95c1c43ce8fb40ae16b5fbee3c7eaae9.jpg b/data/2025/2504_06xxx/2504.06196/images/d7dcc741662b146f89dbd3da83f6492c95c1c43ce8fb40ae16b5fbee3c7eaae9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ca25d0e71bc673d8d14c6bf7456c4c6f426517d6 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/d7dcc741662b146f89dbd3da83f6492c95c1c43ce8fb40ae16b5fbee3c7eaae9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f63007890b26d482bc0d6d8c893b6d91735002ff60dcbf60151258891114dc6 +size 69312 diff --git a/data/2025/2504_06xxx/2504.06196/images/e02f5ed44e79ec81692d41c9c6fe4b312e200e4239d7e952545b07549038790e.jpg b/data/2025/2504_06xxx/2504.06196/images/e02f5ed44e79ec81692d41c9c6fe4b312e200e4239d7e952545b07549038790e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..703af6a1c63ff024da9cf422012f9debca67664e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/e02f5ed44e79ec81692d41c9c6fe4b312e200e4239d7e952545b07549038790e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:666a4353160369e546ea8c60d8f35794cce1e030caa5f1308fb56c38c3a921d9 +size 139960 diff --git a/data/2025/2504_06xxx/2504.06196/images/e6c13c82f9aad77ffe02614d6eac77afddf8ff42b5e2ab8a220b12c5e64a976b.jpg b/data/2025/2504_06xxx/2504.06196/images/e6c13c82f9aad77ffe02614d6eac77afddf8ff42b5e2ab8a220b12c5e64a976b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9cff5d6702cee0c133c8969f5d29dd916ed315f9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/e6c13c82f9aad77ffe02614d6eac77afddf8ff42b5e2ab8a220b12c5e64a976b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71cfd07b0d19138857feae61a54a621b2a9d397f877eb75a97e0da7996e1a0a1 +size 16388 diff --git a/data/2025/2504_06xxx/2504.06196/images/ebfa122c4e8e4aaa8d9e1067f57d4762781618f9e07826cee01379d3677e3b2b.jpg b/data/2025/2504_06xxx/2504.06196/images/ebfa122c4e8e4aaa8d9e1067f57d4762781618f9e07826cee01379d3677e3b2b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..317c00826873b13a1e1780870c620b58099626dc --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/ebfa122c4e8e4aaa8d9e1067f57d4762781618f9e07826cee01379d3677e3b2b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e32f1d062e1939fde77ec935e85fe172b15d4723c7b71f392f61a9d755e1570 +size 16961 diff --git a/data/2025/2504_06xxx/2504.06196/images/edd756cd1d85fd738f6f39022b8b79d554ff5a528cce9f961383a0bd3ccf1307.jpg b/data/2025/2504_06xxx/2504.06196/images/edd756cd1d85fd738f6f39022b8b79d554ff5a528cce9f961383a0bd3ccf1307.jpg new file mode 100644 index 0000000000000000000000000000000000000000..76055ff7b654cd1c3ba1b36239dc5d2b8f23fc24 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/edd756cd1d85fd738f6f39022b8b79d554ff5a528cce9f961383a0bd3ccf1307.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3f01b3866fc1a8be517a5e9f41949fd9758f07bd11c1a07ce953e0df21ad188 +size 21368 diff --git a/data/2025/2504_06xxx/2504.06196/images/f505e8244b73734017b072226795a6c6f33623dd337bfbd862ee723a1ca44b5e.jpg b/data/2025/2504_06xxx/2504.06196/images/f505e8244b73734017b072226795a6c6f33623dd337bfbd862ee723a1ca44b5e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ea10d91e0816436355ef50305dfc1fc60557818c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/f505e8244b73734017b072226795a6c6f33623dd337bfbd862ee723a1ca44b5e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9b4245620c56ee51b413917ca6c1753b94954a8a110cb9d24ac9e2a086750ef +size 370118 diff --git a/data/2025/2504_06xxx/2504.06196/images/f86eb6e369466b452142128bdecbe6aacee09469cd8f80397c5d297b32679576.jpg b/data/2025/2504_06xxx/2504.06196/images/f86eb6e369466b452142128bdecbe6aacee09469cd8f80397c5d297b32679576.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e267446a27686abd3047821b7ce93f2eb4ed4268 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/f86eb6e369466b452142128bdecbe6aacee09469cd8f80397c5d297b32679576.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:810d36d707c8527cf6c43fced8272d17cd5e0b386efb6155ea9483789d380561 +size 232784 diff --git a/data/2025/2504_06xxx/2504.06196/images/fab9bc74db450dc8de75df386d1ccaa290832005e9a1245e9ad9844556fd03a3.jpg b/data/2025/2504_06xxx/2504.06196/images/fab9bc74db450dc8de75df386d1ccaa290832005e9a1245e9ad9844556fd03a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9969979f133ba9b7771b82afd4b3ec8610cebb4e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/images/fab9bc74db450dc8de75df386d1ccaa290832005e9a1245e9ad9844556fd03a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9583834291fd50c747161bd8f236796a0d3fff8a529c802d195a569aca0dce1a +size 16119 diff --git a/data/2025/2504_06xxx/2504.06196/layout.json b/data/2025/2504_06xxx/2504.06196/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..082efc311b8f02bcb86d95d4959dc9058707d04c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06196/layout.json @@ -0,0 +1,27204 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 241, + 67, + 349, + 88 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 67, + 349, + 88 + ], + "spans": [ + { + "bbox": [ + 241, + 67, + 349, + 88 + ], + "type": "text", + "content": "TxGemma:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 77, + 101, + 510, + 125 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 101, + 510, + 125 + ], + "spans": [ + { + "bbox": [ + 77, + 101, + 510, + 125 + ], + "type": "text", + "content": "Efficient and Agentic LLMs for Therapeutics" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 94, + 139, + 493, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 139, + 493, + 171 + ], + "spans": [ + { + "bbox": [ + 94, + 139, + 493, + 171 + ], + "type": "text", + "content": "Eric Wang*,†,1, Samuel Schmidgall*,1, Paul F. Jaeger1, Fan Zhang2, Rory Pilgrim2, Yossi Matias2, Joelle Barral1, David Fleet1 and Shekoofeh Azizi†,1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 194, + 176, + 392, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 176, + 392, + 192 + ], + "spans": [ + { + "bbox": [ + 194, + 176, + 392, + 192 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 194, + 176, + 392, + 192 + ], + "type": "text", + "content": "Google DeepMind, " + }, + { + "bbox": [ + 194, + 176, + 392, + 192 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 194, + 176, + 392, + 192 + ], + "type": "text", + "content": "Google Research" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 88, + 225, + 517, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 225, + 517, + 477 + ], + "spans": [ + { + "bbox": [ + 88, + 225, + 517, + 477 + ], + "type": "text", + "content": "Therapeutic development is a costly and high-risk endeavor that is often plagued by high failure rates. To address this, we introduce TxGemma, a suite of efficient, generalist large language models (LLMs) capable of therapeutic property prediction as well as interactive reasoning and explainability. Unlike task-specific models, TxGemma synthesizes information from diverse sources, enabling broad application across the therapeutic development pipeline. The suite includes 2B, 9B, and 27B parameter models, fine-tuned from Gemma-2 on a comprehensive dataset of small molecules, proteins, nucleic acids, diseases, and cell lines. Across 66 therapeutic development tasks, TxGemma achieved superior or comparable performance to the state-of-the-art generalist model on 64 (superior on 45), and against state-of-the-art specialist models on 50 (superior on 26). Fine-tuning TxGemma models on therapeutic downstream tasks, such as clinical trial adverse event prediction, requires less training data than fine-tuning base LLMs, making TxGemma suitable for data-limited applications. Beyond these predictive capabilities, TxGemma features conversational models that bridge the gap between general LLMs and specialized property predictors. These allow scientists to interact in natural language, provide mechanistic reasoning for predictions based on molecular structure, and engage in scientific discussions. Building on this, we further introduce Agentic-Tx, a generalist therapeutic agentic system powered by Gemini 2.5 that reasons, acts, manages diverse workflows, and acquires external domain knowledge. Agentic-Tx surpasses prior leading models on the Humanity's Last Exam benchmark (Chemistry & Biology) with " + }, + { + "bbox": [ + 88, + 225, + 517, + 477 + ], + "type": "inline_equation", + "content": "52.3\\%" + }, + { + "bbox": [ + 88, + 225, + 517, + 477 + ], + "type": "text", + "content": " relative improvement over o3-mini (high) and " + }, + { + "bbox": [ + 88, + 225, + 517, + 477 + ], + "type": "inline_equation", + "content": "26.7\\%" + }, + { + "bbox": [ + 88, + 225, + 517, + 477 + ], + "type": "text", + "content": " over o3-mini (high) on GPQA (Chemistry). On ChemBench, TxGemma excels with improvements of " + }, + { + "bbox": [ + 88, + 225, + 517, + 477 + ], + "type": "inline_equation", + "content": "6.3\\%" + }, + { + "bbox": [ + 88, + 225, + 517, + 477 + ], + "type": "text", + "content": " (ChemBench-Preference) and " + }, + { + "bbox": [ + 88, + 225, + 517, + 477 + ], + "type": "inline_equation", + "content": "2.4\\%" + }, + { + "bbox": [ + 88, + 225, + 517, + 477 + ], + "type": "text", + "content": " (ChemBench-Mini) over o3-mini (high), as well as " + }, + { + "bbox": [ + 88, + 225, + 517, + 477 + ], + "type": "inline_equation", + "content": "17.7\\%" + }, + { + "bbox": [ + 88, + 225, + 517, + 477 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 88, + 225, + 517, + 477 + ], + "type": "inline_equation", + "content": "5.6\\%" + }, + { + "bbox": [ + 88, + 225, + 517, + 477 + ], + "type": "text", + "content": " over o1, respectively. TxGemma's collection is released as open models, enabling researchers to adapt and validate it on their own diverse datasets, thus facilitating more challenging real-world applications." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 496, + 160, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 496, + 160, + 510 + ], + "spans": [ + { + "bbox": [ + 68, + 496, + 160, + 510 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 520, + 542, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 520, + 542, + 593 + ], + "spans": [ + { + "bbox": [ + 67, + 520, + 542, + 593 + ], + "type": "text", + "content": "The pharmaceutical industry faces significant challenges in bringing new therapeutics to market. High attrition rates and lengthy, costly development timelines [3, 4] necessitate innovative approaches to therapeutic development. Success requires a drug candidate to not only demonstrate efficacy but also possess favorable safety, metabolic stability, pharmacokinetic/pharmacodynamic properties and developability, among other characteristics. Determining these diverse characteristics often relies on a large array of complex and expensive experimental procedures, highlighting the need for more efficient methods." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 597, + 543, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 597, + 543, + 682 + ], + "spans": [ + { + "bbox": [ + 67, + 597, + 543, + 682 + ], + "type": "text", + "content": "Computational approaches, such as machine learning, are emerging as powerful tools to address these challenges. Leveraging predictive models trained on curated datasets allows researchers to prioritize promising candidates early in the development process, reducing reliance on costly experimental assays [5]. Publicly available databases of molecular properties and biological activity are crucial for training and validating these models. In this area, a major development was the curation of the Therapeutics Data Commons (TDC) [6, 7, 8], which contains datasets and benchmarks for many different tasks throughout the therapeutic development pipeline, ranging from early-stage target identification to late-stage clinical trial approval." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 685, + 543, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 685, + 543, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 685, + 543, + 723 + ], + "type": "text", + "content": "Recent advancements in large language models (LLMs) offer a compelling opportunity to leverage available datasets and address limitations in the therapeutic development process. LLMs have demonstrated the capacity to integrate and learn from diverse data sources across various domains, including scientific applications [9, 10," + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 219, + 36, + 555 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 219, + 36, + 555 + ], + "spans": [ + { + "bbox": [ + 14, + 219, + 36, + 555 + ], + "type": "text", + "content": "arXiv:2504.06196v1 [cs.AI] 8 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 740, + 154, + 750 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 740, + 154, + 750 + ], + "spans": [ + { + "bbox": [ + 67, + 740, + 154, + 750 + ], + "type": "text", + "content": "* Equal contributions." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 750, + 295, + 761 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 750, + 295, + 761 + ], + "spans": [ + { + "bbox": [ + 67, + 750, + 295, + 761 + ], + "type": "text", + "content": "† Corresponding authors: {shekazizi, ericzwang}@google.com" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 68, + 541, + 293 + ], + "blocks": [ + { + "bbox": [ + 70, + 68, + 541, + 293 + ], + "lines": [ + { + "bbox": [ + 70, + 68, + 541, + 293 + ], + "spans": [ + { + "bbox": [ + 70, + 68, + 541, + 293 + ], + "type": "image", + "image_path": "0203c8f019b173fb8d62c4ee904f1f418162cc6b09d58945c721893ac5ac6ad4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 71, + 305, + 294, + 406 + ], + "blocks": [ + { + "bbox": [ + 71, + 305, + 294, + 406 + ], + "lines": [ + { + "bbox": [ + 71, + 305, + 294, + 406 + ], + "spans": [ + { + "bbox": [ + 71, + 305, + 294, + 406 + ], + "type": "image", + "image_path": "6c5ae632ad262260986f1fe3efcfbe3a474c92834958a1bd7c97690782bbf10d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 71, + 426, + 294, + 529 + ], + "blocks": [ + { + "bbox": [ + 71, + 426, + 294, + 529 + ], + "lines": [ + { + "bbox": [ + 71, + 426, + 294, + 529 + ], + "spans": [ + { + "bbox": [ + 71, + 426, + 294, + 529 + ], + "type": "image", + "image_path": "778634aa5c8b1460d1e1005fbb3e9371ca11ba143cab37feb55f39531fd8284d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 72, + 535, + 291, + 574 + ], + "blocks": [ + { + "bbox": [ + 72, + 535, + 291, + 574 + ], + "lines": [ + { + "bbox": [ + 72, + 535, + 291, + 574 + ], + "spans": [ + { + "bbox": [ + 72, + 535, + 291, + 574 + ], + "type": "image", + "image_path": "0114d9a00ab47d79177889b7773fe64b8b18a5fc2fdf408061a2db63ed40fe38.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 583, + 542, + 740 + ], + "lines": [ + { + "bbox": [ + 67, + 583, + 542, + 740 + ], + "spans": [ + { + "bbox": [ + 67, + 583, + 542, + 740 + ], + "type": "text", + "content": "Figure 1 | Overview of TxGemma. (top) All TxGemma variants are trained on diverse data sources of the Therapeutic Data Commons (TDC). TxGemma-Predict comes in three size variants (2B, 9B, and 27B) and is trained for high-performance predictions on a broad set of therapeutic development tasks. TxGemma-Chat features two variants (9B and 27B) and is trained on a combination of TDC data with general Gemma-2 instruction tuning data to retain conversational and reasoning capabilities. Agentic-Tx, a therapeutics-focused agentic system powered by Gemini 2.5, has access to 18 tools including TxGemma-Predict and TxGemma-Chat to collect external knowledge and manages complex tasks in either autonomous or interactive settings. (bottom-right) Absolute performance of Agentic-Tx compared to best-in-class models on three complex therapeutic-related reasoning benchmarks. The state-of-the-art (SOTA) values are obtained from [1, 2] and details are listed in Table 3. Dashed lines: L=lowest, M=mean, H=highest human scores. (bottom-left) Relative performance changes of TxGemma-Predict compared to the SOTA generalist model for each task type. The assignment of the 66 evaluated TDC tasks to task types is shown in Tables S.2 and S.3. The bottom bar chart shows a summary of results where TxGemma-Predict outperforms or nearly matches SOTA (light blue), and outperforms SOTA (darker blue)." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 316, + 307, + 538, + 376 + ], + "blocks": [ + { + "bbox": [ + 316, + 307, + 538, + 376 + ], + "lines": [ + { + "bbox": [ + 316, + 307, + 538, + 376 + ], + "spans": [ + { + "bbox": [ + 316, + 307, + 538, + 376 + ], + "type": "image", + "image_path": "edd756cd1d85fd738f6f39022b8b79d554ff5a528cce9f961383a0bd3ccf1307.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 317, + 380, + 538, + 442 + ], + "blocks": [ + { + "bbox": [ + 317, + 380, + 538, + 442 + ], + "lines": [ + { + "bbox": [ + 317, + 380, + 538, + 442 + ], + "spans": [ + { + "bbox": [ + 317, + 380, + 538, + 442 + ], + "type": "image", + "image_path": "45bd5bb59ecbb59ebf3c3bd02e1a6e2b8cdf58c01e28987f97b63a2162234f73.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 317, + 445, + 539, + 509 + ], + "blocks": [ + { + "bbox": [ + 317, + 445, + 539, + 509 + ], + "lines": [ + { + "bbox": [ + 317, + 445, + 539, + 509 + ], + "spans": [ + { + "bbox": [ + 317, + 445, + 539, + 509 + ], + "type": "image", + "image_path": "fab9bc74db450dc8de75df386d1ccaa290832005e9a1245e9ad9844556fd03a3.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 317, + 512, + 541, + 574 + ], + "blocks": [ + { + "bbox": [ + 317, + 512, + 541, + 574 + ], + "lines": [ + { + "bbox": [ + 317, + 512, + 541, + 574 + ], + "spans": [ + { + "bbox": [ + 317, + 512, + 541, + 574 + ], + "type": "image", + "image_path": "6c7190df8fb3e455a4678103a2125ca56783f3046dad39ad10081da4aeb70ed1.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 542, + 143 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 542, + 143 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 542, + 143 + ], + "type": "text", + "content": "11]. Their potential to connect disparate aspects of drug development, such as chemical structure, biological activity, and clinical trial outcomes, is particularly exciting. In this context, we have previously introduced Tx-LLM, a LLM fine-tuned from a collection of question-answer instruction-tuning datasets based on TDC [12]. While promising, the model's lack of conversational capabilities prevented reasoning or user interaction, limiting its value for scientists who require a model that can understand complex queries and engage in nuanced discussions." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 148, + 542, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 148, + 542, + 233 + ], + "spans": [ + { + "bbox": [ + 67, + 148, + 542, + 233 + ], + "type": "text", + "content": "In this work, we introduce TxGemma, a suite of efficient, generalist LLMs trained for therapeutics. Building on, but significantly extending, our previous work [12], TxGemma leverages LLMs to synthesize information from diverse sources. The suite includes 2B, 9B, and 27B parameter models, fine-tuned from Gemma-2 [13, 14] using a collection of therapeutic instruction-tuning datasets encompassing small molecules, proteins, nucleic acids, diseases, and cell lines. For the first time in therapeutic AI, TxGemma features conversational counterparts capable of reasoning and explanation, moving beyond black-box predictions to facilitate mechanistic understanding and scientific discussions. Our key contributions are as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 77, + 243, + 541, + 526 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 77, + 243, + 541, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 243, + 541, + 340 + ], + "spans": [ + { + "bbox": [ + 77, + 243, + 541, + 340 + ], + "type": "text", + "content": "- Efficient Generalist Therapeutic LLMs: TxGemma represents a potential shift from task-specific AI to efficient generalist models in therapeutic development. These efficient LLMs (2B-27B parameters) offer a competitive alternative to specialized models, achieving strong performance across a broad range of predictive and generative tasks. Out of 66 therapeutic development tasks curated by TDC, TxGemma-Predict outperforms or nearly matches the state-of-the-art generalist model on 64 (outperforms on 45) and state-of-the-art specialist models on 50 (outperforms on 26). Additionally, fine-tuning TxGemma models on clinical trial adverse event prediction requires less data to achieve strong performance compared to base Gemma-2 models, an important advantage for data-limited fields." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 77, + 345, + 541, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 345, + 541, + 394 + ], + "spans": [ + { + "bbox": [ + 77, + 345, + 541, + 394 + ], + "type": "text", + "content": "- Explainable and Interactive Therapeutic Models: TxGemma-Chat introduces reasoning and explanation capabilities, bridging the gap between general LLMs and specialized property predictors. Scientists can interact with TxGemma-Chat using natural language, exploring complex questions, receive explanations for predictions (e.g., based on molecular structure), and engage in scientific discussions." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 77, + 399, + 541, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 399, + 541, + 460 + ], + "spans": [ + { + "bbox": [ + 77, + 399, + 541, + 460 + ], + "type": "text", + "content": "- Agentic Orchestration of Therapeutic Development Workflows: We further introduce Agentic-Tx, a therapeutics-focused agentic system powered by Gemini 2.5, demonstrating how TxGemma models can be integrated as tools. Equipped with 18 tools, Agentic-Tx solves complex, multi-step problems, achieving state-of-the-art results on reasoning-intensive chemistry and biology benchmarks, including Humanity's Last Exam [15] and ChemBench [1]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 77, + 465, + 541, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 465, + 541, + 526 + ], + "spans": [ + { + "bbox": [ + 77, + 465, + 541, + 526 + ], + "type": "text", + "content": "- Enabling Innovative Research with Open Models: Understanding the prevalence of proprietary data in therapeutic research, we release TxGemma models trained only on datasets with commercial licenses as open models to empower researchers to adapt and refine them on their own data. This facilitates validation and potential performance improvements tailored to their specific research needs, paving the way for therapy safety and efficacy in more challenging real-world therapeutic applications." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 540, + 137, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 540, + 137, + 552 + ], + "spans": [ + { + "bbox": [ + 67, + 540, + 137, + 552 + ], + "type": "text", + "content": "2 Methods" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 566, + 118, + 577 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 566, + 118, + 577 + ], + "spans": [ + { + "bbox": [ + 67, + 566, + 118, + 577 + ], + "type": "text", + "content": "2.1 Data" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 586, + 542, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 586, + 542, + 696 + ], + "spans": [ + { + "bbox": [ + 67, + 586, + 542, + 696 + ], + "type": "text", + "content": "Therapeutic Data Commons (TDC) We leverage the Therapeutic Data Commons (TDC) [7, 6], a comprehensive collection of 66 AI-ready datasets spanning the drug discovery and development pipeline. TDC includes over 15 million datapoints across various biomedical entities and encompasses single-instance prediction, multi-instance prediction, and generation tasks [7]. We focus on TDC tasks relevant to drug discovery, incorporating diverse therapeutic representations: SMILES strings (small molecules), amino acid sequences (proteins and peptides, including specialized representations for MHC molecules and T-cell receptors), nucleotide sequences (nucleic acids), and natural language text (disease/cell line names) (see Table S.6 for examples). Many tasks combine multiple representations. (See Table S.1 for task inclusion criteria and Tables S.7 and S.8 for biological contexts of certain tasks.)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 699, + 541, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 699, + 541, + 724 + ], + "spans": [ + { + "bbox": [ + 67, + 699, + 541, + 724 + ], + "type": "text", + "content": "Therapeutic Instruction-Tuning Following Chaves et al. [12], we transform the raw TDC data into an instruction-tuning format suitable for LLMs. Each data point is formatted as a prompt:" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 83, + 72, + 541, + 154 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 83, + 72, + 271, + 82 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 72, + 271, + 82 + ], + "spans": [ + { + "bbox": [ + 83, + 72, + 271, + 82 + ], + "type": "text", + "content": "- Instruction: Briefly describes the task." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 83, + 541, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 83, + 541, + 105 + ], + "spans": [ + { + "bbox": [ + 83, + 83, + 541, + 105 + ], + "type": "text", + "content": "- Context: Provides 2-3 sentences of relevant biochemical background, derived from TDC descriptions and literature." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 83, + 107, + 541, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 107, + 541, + 131 + ], + "spans": [ + { + "bbox": [ + 83, + 107, + 541, + 131 + ], + "type": "text", + "content": "- Question: Queries a specific therapeutic property, incorporating textual representations of therapeutics and/or targets (e.g., \"Does the following molecule cross the blood-brain barrier? \")." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 83, + 132, + 541, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 132, + 541, + 154 + ], + "spans": [ + { + "bbox": [ + 83, + 132, + 541, + 154 + ], + "type": "text", + "content": "- Answer: Formatted as (A)/(B) for binary classification, a binned continuous value for regression, or a SMILES string for generation." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 167, + 542, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 167, + 542, + 204 + ], + "spans": [ + { + "bbox": [ + 67, + 167, + 542, + 204 + ], + "type": "text", + "content": "This process yields 7,080,338 training, 956,575 validation, and 1,917,297 test data points (Figure S.1, Tables S.2 and S.3). Data splits closely follow TDC's recommended methodologies (random, scaffold, cold-start, combination, temporal) (Table S.2, Table S.3). Detailed task descriptions are in Tables S.4 and S.5." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 208, + 542, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 208, + 542, + 269 + ], + "spans": [ + { + "bbox": [ + 67, + 208, + 542, + 269 + ], + "type": "text", + "content": "We employ a few-shot prompting strategy to promote in-context learning [16], using a blend of " + }, + { + "bbox": [ + 67, + 208, + 542, + 269 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 67, + 208, + 542, + 269 + ], + "type": "text", + "content": " zero-shot and " + }, + { + "bbox": [ + 67, + 208, + 542, + 269 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 67, + 208, + 542, + 269 + ], + "type": "text", + "content": " few-shot prompts [17, 12]. For few-shot prompts, we randomly sample examples from the training set (Table S.9), as intra-training set similarity is higher than training-test set similarity (Figure S.2). The number of examples is uniformly selected between 1 and 10 so that few-shot prompting is robust to the number of examples during evaluation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 278, + 143, + 292 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 278, + 143, + 292 + ], + "spans": [ + { + "bbox": [ + 67, + 278, + 143, + 292 + ], + "type": "text", + "content": "2.2 Modeling" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 299, + 542, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 299, + 542, + 361 + ], + "spans": [ + { + "bbox": [ + 67, + 299, + 542, + 361 + ], + "type": "text", + "content": "Base LLM. TxGemma is built upon the Gemma-2 [14] family of lightweight, state-of-the-art open LLMs. Gemma-2 models utilize a decoder-only transformer architecture, incorporating architectural modifications such as interleaved local-global attention and group-query attention, and are trained using Gemini technology [18]. We utilize Gemma-2 base models at 2B, 9B, and 27B parameters. 2B and 9B Gemma-2 models were initially trained via knowledge distillation [14]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 363, + 542, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 363, + 542, + 460 + ], + "spans": [ + { + "bbox": [ + 67, + 363, + 542, + 460 + ], + "type": "text", + "content": "Predictive Model Fine-Tuning. We fine-tune the 2B, 9B, and 27B Gemma-2 base models on the therapeutic instruction-tuning data derived from TDC, creating TxGemma-2B-Predict, TxGemma-9B-Predict, and TxGemma-27B-Predict, respectively. Training was performed across all TDC tasks, with mixture ratios proportional to the number of training data points (see Tables S.2 and S.3 for data distribution). This encompassed all approximately 7 million training examples, comprising 3.3 million from regression/generation and 3.7 million from binary classification tasks. Fine-tuning proceeded for 67B tokens (12 epochs) using 256 TPUv4 chips with 8-way data replication, 4-way sequence sharding, and 4-way model sharding. In this work, \"TxGemma\" generally refers to the generalist, predictive TxGemma-27B-Predict." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 464, + 542, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 464, + 542, + 525 + ], + "spans": [ + { + "bbox": [ + 67, + 464, + 542, + 525 + ], + "type": "text", + "content": "Conversational Model Fine-Tuning. We also trained conversational counterparts, TxGemma-9B-Chat and TxGemma-27B-Chat, by supplementing the therapeutic instruction-tuning data with general instruction-tuning data, as detailed in the Gemma-2 report [14]. The training data mixture comprised " + }, + { + "bbox": [ + 67, + 464, + 542, + 525 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 67, + 464, + 542, + 525 + ], + "type": "text", + "content": " therapeutic data and " + }, + { + "bbox": [ + 67, + 464, + 542, + 525 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 67, + 464, + 542, + 525 + ], + "type": "text", + "content": " general instruction-tuning data. Conversational models were trained using the same number of tokens and TPU configuration as the predictive models." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 534, + 282, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 534, + 282, + 548 + ], + "spans": [ + { + "bbox": [ + 67, + 534, + 282, + 548 + ], + "type": "text", + "content": "2.3 Evaluating Predictive Performance" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 555, + 542, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 555, + 542, + 640 + ], + "spans": [ + { + "bbox": [ + 67, + 555, + 542, + 640 + ], + "type": "text", + "content": "Prompting strategy For test set evaluations, we use 10-shot prompting, selecting exemplars from the nearest neighbors within the combined training and validation set (not the test set), as detailed in Table S.9. Nearest neighbors were determined using different methods based on molecule type. For small molecules, we used RDKit [19] to generate Morgan fingerprints (radius 2 and size 2048), representing molecular substructures as binary vectors. Subsequently, we used Chemfp [20] to compute Tanimoto similarities, which quantify fingerprint overlap. For amino acid and nucleotide sequences, nearest neighbors were defined by percent sequence identity, determined through multiple sequence alignments performed with Clustal Omega [21]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 644, + 542, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 644, + 542, + 717 + ], + "spans": [ + { + "bbox": [ + 67, + 644, + 542, + 717 + ], + "type": "text", + "content": "Performance Metrics and Statistical Tests We assess performance using the preferred metrics for each task, as defined by TDC [7] and used by previous models. Binary classification tasks are assessed with area under the receiver operating characteristic curve (AUROC), area under the precision-recall curve (AUPRC), and accuracy. Regression tasks use Spearman's and Pearson correlation coefficients, mean absolute error (MAE), and mean squared error (MSE). The USPTO generation task uses \"set accuracy,\" scoring 1 for perfect overlap between predicted and true reactant sets, and 0 otherwise. Bootstrapped metrics are calculated" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 95, + 79, + 286, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 79, + 286, + 106 + ], + "spans": [ + { + "bbox": [ + 95, + 79, + 286, + 106 + ], + "type": "text", + "content": "Prompt: Imagine an early virtual screening campaign setting. Which of the following two candidates would you prefer for further development?" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 96, + 115, + 162, + 123 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 115, + 162, + 123 + ], + "spans": [ + { + "bbox": [ + 96, + 115, + 162, + 123 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 96, + 128, + 163, + 135 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 128, + 163, + 135 + ], + "spans": [ + { + "bbox": [ + 96, + 128, + 163, + 135 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 328, + 79, + 485, + 88 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 79, + 485, + 88 + ], + "spans": [ + { + "bbox": [ + 328, + 79, + 485, + 88 + ], + "type": "text", + "content": "Agent " + }, + { + "bbox": [ + 328, + 79, + 485, + 88 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 328, + 79, + 485, + 88 + ], + "type": "text", + "content": " TxGemma-ClinTox: Is the following toxic?" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 329, + 89, + 395, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 329, + 89, + 395, + 96 + ], + "spans": [ + { + "bbox": [ + 329, + 89, + 395, + 96 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 329, + 98, + 407, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 329, + 98, + 407, + 106 + ], + "spans": [ + { + "bbox": [ + 329, + 98, + 407, + 106 + ], + "type": "text", + "content": "TxGemma ClinTox: Toxic" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 328, + 114, + 487, + 122 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 114, + 487, + 122 + ], + "spans": [ + { + "bbox": [ + 328, + 114, + 487, + 122 + ], + "type": "text", + "content": "Agent " + }, + { + "bbox": [ + 328, + 114, + 487, + 122 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 328, + 114, + 487, + 122 + ], + "type": "text", + "content": " TxGemma-ClinTox: Is the following toxic?" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 329, + 123, + 395, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 329, + 123, + 395, + 131 + ], + "spans": [ + { + "bbox": [ + 329, + 123, + 395, + 131 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 329, + 133, + 422, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 329, + 133, + 422, + 140 + ], + "spans": [ + { + "bbox": [ + 329, + 133, + 422, + 140 + ], + "type": "text", + "content": "TxGemma ClinTox: Non-toxic" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 100, + 150, + 510, + 277 + ], + "blocks": [ + { + "bbox": [ + 100, + 150, + 510, + 277 + ], + "lines": [ + { + "bbox": [ + 100, + 150, + 510, + 277 + ], + "spans": [ + { + "bbox": [ + 100, + 150, + 510, + 277 + ], + "type": "image", + "image_path": "a3c869088761c5ea30145738fb0e7de8fb845000d423b65d65ef9ab5098f21ca.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 381, + 542, + 477 + ], + "lines": [ + { + "bbox": [ + 67, + 381, + 542, + 477 + ], + "spans": [ + { + "bbox": [ + 67, + 381, + 542, + 477 + ], + "type": "text", + "content": "Figure 2 | Example workflow of agentic planning and execution with Agentic-Tx. Agentic-Tx uses the ReAct framework [22] to interleave thought with tool-usage. When a user poses a query, Agentic-Tx checks whether the query structure matches any defined tool trigger. If so, the query is routed to the corresponding tool, which (i) parses the request, (ii) invokes specialized logic, and (iii) returns a structured answer to the agent. The agent then composes a user-facing response. This adaptive tool-use mechanism is especially helpful for tasks that require external references, chemical data transformations, or precise chemical information, areas where self-contained LLMs often hallucinate. In the displayed example, Agentic-Tx uses two tools to solve a complex therapeutic task: TxGemma-Chat and the clinical toxicity prediction tool based on TxGemma-Predict." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 95, + 286, + 286, + 311 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 286, + 286, + 311 + ], + "spans": [ + { + "bbox": [ + 95, + 286, + 286, + 311 + ], + "type": "text", + "content": "Agent " + }, + { + "bbox": [ + 95, + 286, + 286, + 311 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 95, + 286, + 286, + 311 + ], + "type": "text", + "content": " TxGemma-Chat: Given two drug candidates, what factors would influence your decision to prioritize one over the other in an early virtual screening campaign?" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 95, + 315, + 282, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 315, + 282, + 332 + ], + "spans": [ + { + "bbox": [ + 95, + 315, + 282, + 332 + ], + "type": "text", + "content": "TxGemma-Chat: Investigate whether the drugs would pass through a clinical trial, based on properties such as toxicity." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 375, + 304, + 507, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 375, + 304, + 507, + 313 + ], + "spans": [ + { + "bbox": [ + 375, + 304, + 507, + 313 + ], + "type": "text", + "content": "Agent: Final answer: " + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 91, + 351, + 187, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 351, + 187, + 362 + ], + "spans": [ + { + "bbox": [ + 91, + 351, + 187, + 362 + ], + "type": "text", + "content": "Natural Language Input" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 217, + 351, + 283, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 351, + 283, + 361 + ], + "spans": [ + { + "bbox": [ + 217, + 351, + 283, + 361 + ], + "type": "text", + "content": "Reason + Action" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 345, + 351, + 383, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 345, + 351, + 383, + 360 + ], + "spans": [ + { + "bbox": [ + 345, + 351, + 383, + 360 + ], + "type": "text", + "content": "Tool-use" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 445, + 351, + 498, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 445, + 351, + 498, + 360 + ], + "spans": [ + { + "bbox": [ + 445, + 351, + 498, + 360 + ], + "type": "text", + "content": "Final Answer" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 67, + 505, + 541, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 505, + 541, + 530 + ], + "spans": [ + { + "bbox": [ + 67, + 505, + 541, + 530 + ], + "type": "text", + "content": "using 1000 samples. To compare overall performance between two models across all TDC tasks, we use the non-parametric Wilcoxon signed-rank test and report the corresponding p-value (details in Appendix C.1)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 67, + 539, + 177, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 539, + 177, + 552 + ], + "spans": [ + { + "bbox": [ + 67, + 539, + 177, + 552 + ], + "type": "text", + "content": "2.4 Agentic System" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 67, + 559, + 542, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 559, + 542, + 632 + ], + "spans": [ + { + "bbox": [ + 67, + 559, + 542, + 632 + ], + "type": "text", + "content": "One limitation of LLMs for discovery is that, while their prediction capabilities are powerful, they do not have access to up-to-date external knowledge, such as research articles or domain-specific prediction models. These knowledge cut-offs prevent the model from answering questions outside of its training scope. Additionally, some questions involve multiple reasoning steps to solve, for example, the question \"What structural modifications could improve the potency of the given drug?\" requires iteratively searching the drug's structural space and then prompting TxGemma to predict potency." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 67, + 636, + 543, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 636, + 543, + 673 + ], + "spans": [ + { + "bbox": [ + 67, + 636, + 543, + 673 + ], + "type": "text", + "content": "Agentic-Tx, our therapeutics-focused agentic system powered by Gemini 2.5 [18], extends TxGemma's capabilities by orchestrating such complex workflows. Agentic-Tx employs a modular, tool-usage paradigm, in contrast to TxGemma's direct generation of solutions." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 67, + 677, + 542, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 677, + 542, + 713 + ], + "spans": [ + { + "bbox": [ + 67, + 677, + 542, + 713 + ], + "type": "text", + "content": "Reasoning and Action Framework Agentic-Tx utilizes the ReAct framework [22], allowing it to interleave reasoning steps (\"thoughts\") with actions (tool use). The agentic system receives a task or question and iteratively takes actions based on its current context. Each action typically involves using a tool, which" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 542, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 542, + 144 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 542, + 144 + ], + "type": "text", + "content": "returns an observation. Key to ReAct is this iterative process of observing, reasoning, and acting, allowing Agentic-Tx to dynamically adjust its approach based on the information it gathers. Because tools may return large outputs, we summarize these observations in order to maintain a concise and relevant context. This iterative process of observing, reasoning, acting, and updating its context allows Agentic-Tx to dynamically adjust its approach and gather the necessary information to answer the initial query. Finally, Agentic-Tx integrates the gathered information and formulates a user-friendly response." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 148, + 542, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 148, + 542, + 172 + ], + "spans": [ + { + "bbox": [ + 67, + 148, + 542, + 172 + ], + "type": "text", + "content": "Agentic Tools Agentic-Tx is equipped with 18 tools across four categories (detailed tool descriptions are in Table S.12). They can be broadly categorized as:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 79, + 184, + 541, + 316 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 80, + 184, + 541, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 184, + 541, + 243 + ], + "spans": [ + { + "bbox": [ + 80, + 184, + 541, + 243 + ], + "type": "text", + "content": "1. TxGemma-based Tools: These provide access to TxGemma's capabilities. The Chat tool enables interaction with TxGemma-27B-Chat. The ClinicalTox and ToxCast tools utilize TxGemma-27B-Predict for toxicity predictions. " + }, + { + "bbox": [ + 80, + 184, + 541, + 243 + ], + "type": "inline_equation", + "content": "IC_{50}" + }, + { + "bbox": [ + 80, + 184, + 541, + 243 + ], + "type": "text", + "content": " returns the predicted normalized " + }, + { + "bbox": [ + 80, + 184, + 541, + 243 + ], + "type": "inline_equation", + "content": "IC_{50}" + }, + { + "bbox": [ + 80, + 184, + 541, + 243 + ], + "type": "text", + "content": " between a drug and protein, the Mutagenicity tool predicts drug mutagenicity, and the Phase1 Trial tool predicts whether a drug would pass a Phase 1 clinical trial." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 79, + 245, + 541, + 267 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 245, + 541, + 267 + ], + "spans": [ + { + "bbox": [ + 79, + 245, + 541, + 267 + ], + "type": "text", + "content": "2. General Tools: These query external knowledge resources, including PubMed, Wikipedia, and the web." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 79, + 269, + 541, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 269, + 541, + 293 + ], + "spans": [ + { + "bbox": [ + 79, + 269, + 541, + 293 + ], + "type": "text", + "content": "3. Molecule Tools: These leverage domain-specific libraries for tasks such as retrieving molecular descriptors (e.g., from PubChem) and performing chemical structure conversions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 80, + 293, + 541, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 293, + 541, + 316 + ], + "spans": [ + { + "bbox": [ + 80, + 293, + 541, + 316 + ], + "type": "text", + "content": "4. Gene & Protein Tools: These leverage domain-specific libraries for tasks involving genes or proteins, such as retrieving gene descriptions and protein descriptions (e.g., from the NCBI Gene database)." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 332, + 129, + 344 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 332, + 129, + 344 + ], + "spans": [ + { + "bbox": [ + 67, + 332, + 129, + 344 + ], + "type": "text", + "content": "3 Results" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 358, + 281, + 370 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 358, + 281, + 370 + ], + "spans": [ + { + "bbox": [ + 67, + 358, + 281, + 370 + ], + "type": "text", + "content": "3.1 TxGemma Predictive Performance" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 380, + 350, + 392 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 380, + 350, + 392 + ], + "spans": [ + { + "bbox": [ + 67, + 380, + 350, + 392 + ], + "type": "text", + "content": "3.1.1 Comparison with best-in-class therapeutic models" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 401, + 543, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 401, + 543, + 534 + ], + "spans": [ + { + "bbox": [ + 67, + 401, + 543, + 534 + ], + "type": "text", + "content": "To provide a comprehensive evaluation of our models' predictive capabilities, we benchmark against both specialist and generalist baselines. For specialist comparisons, we define best-in-class performance metrics for each task using previous models. Specifically, we utilize TDC leaderboard scores for tasks where available (ADMET, DrugCombo, DTI DG). For remaining tasks, values are reported from a literature review and are detailed in Tables S.13 and S.14. These specialist performance values align with those reported in Chaves et al. [12]. Additionally, we compare our models against three prominent therapeutic generalist and multi-task models: Tx-LLM [12], LlaSMol [23], and MolE [24]. Tx-LLM, with its two size-variants S and M, shares similar training data to our approach enabling a direct comparison across all tasks. LlaSMol a suite of generalist models built upon fine-tuned open-source LLMs trained for small-molecule applications [23]. Similarly, MolE was developed as a graph-based multi-task foundation model for small molecules. LlaSMol and MolE, specialized for small molecules, offer strong baselines for small molecule tasks." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 537, + 543, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 537, + 543, + 635 + ], + "spans": [ + { + "bbox": [ + 67, + 537, + 543, + 635 + ], + "type": "text", + "content": "TxGemma shows improved performance compared to therapeutic generalist models In Figure 3, we compare the performance of TxGemma-27B-Predict to the two existing models in the Tx-LLM [12] family, Tx-LLM M and Tx-LLM S, built over PaLM-2 on TDC tasks. TxGemma-27B-Predict surpasses Tx-LLM M on 45 out of 66 tasks, while underperforming on 21. In addition, it outperforms Tx-LLM S on 62 and underperforms Tx-LLM S on only 4. Aggregating performance over task, we observe a statistically significant improvement of TxGemma-27B-Predict over Tx-LLM models " + }, + { + "bbox": [ + 67, + 537, + 543, + 635 + ], + "type": "inline_equation", + "content": "(p = 0.003" + }, + { + "bbox": [ + 67, + 537, + 543, + 635 + ], + "type": "text", + "content": ", Wilcoxon signed-rank test). These results demonstrate that TxGemma provides a highly competitive alternative to its predecessor with improved functionality at a substantially reduced model size." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 639, + 543, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 639, + 543, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 639, + 543, + 723 + ], + "type": "text", + "content": "TxGemma is competitive with specialist therapeutic models Figure 4 and Figure S.4 compare TxGemma's performance with best-in-class specialist model across tasks containing various combinations of SMILES, amino acid, nucleotide, and text inputs. In a comparison with specialist best-in-class models, TxGemma-27B-Predict outperforms the state-of-the-art (SOTA) on 26 and performs near SOTA on 50. This is a substantial improvement over its predecessor Tx-LLM M, which outperformed SOTA on 22 tasks and near SOTA on 43. These results demonstrate the improved capabilities of TxGemma-27B-Predict and its competitiveness with current specialist models designed for specific tasks and therapeutic feature types." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 69, + 541, + 395 + ], + "blocks": [ + { + "bbox": [ + 70, + 69, + 541, + 395 + ], + "lines": [ + { + "bbox": [ + 70, + 69, + 541, + 395 + ], + "spans": [ + { + "bbox": [ + 70, + 69, + 541, + 395 + ], + "type": "image", + "image_path": "e02f5ed44e79ec81692d41c9c6fe4b312e200e4239d7e952545b07549038790e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 413, + 544, + 510 + ], + "lines": [ + { + "bbox": [ + 67, + 413, + 544, + 510 + ], + "spans": [ + { + "bbox": [ + 67, + 413, + 544, + 510 + ], + "type": "text", + "content": "Figure 3 | Comparison of TxGemma-Predict's performance with therapeutic generalist models. (top) relative performance improvement of TxGemma-27B-Predict in comparison to Tx-LLM S. TxGemma-27B-Predict outperforms Tx-LLM S on 62 and underperforms on only 4. (bottom) relative performance improvement of TxGemma-27B-Predict in comparison to Tx-LLM M. TxGemma-27B-Predict outperforms Tx-LLM M on 45 out of 66 tasks, while underperforming on 21. When aggregating performance over task, we observe a net improvement of TxGemma-27B-Predict over Tx-LLM models, with a statistically significant difference " + }, + { + "bbox": [ + 67, + 413, + 544, + 510 + ], + "type": "inline_equation", + "content": "(p = 0.003" + }, + { + "bbox": [ + 67, + 413, + 544, + 510 + ], + "type": "text", + "content": ", Wilcoxon signed-rank test). These results establish TxGemma-27B-Predict as a competitive and functionally enhanced alternative at practical model sizes. Values for each task can be found in Tables S.15 and S.16." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 536, + 544, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 536, + 544, + 660 + ], + "spans": [ + { + "bbox": [ + 67, + 536, + 544, + 660 + ], + "type": "text", + "content": "TxGemma performs similarly to multi-task models specialized for small molecules Table 1 and Figure S.6 compare the predictive performance of TxGemma-27B-Predict with MolE, a graph-based multi-task foundation model for small molecules. MolE performs within the " + }, + { + "bbox": [ + 67, + 536, + 544, + 660 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 67, + 536, + 544, + 660 + ], + "type": "text", + "content": " CIs of TxGemma-27B-Predict for 15 out of 22 tasks. Furthermore, both TxGemma-27B-Predict and TxGemma-9B-Predict outperform LlaSMolMistral (7B), the top performing model from the LlaSMol suite, on 2 of 5 shared tasks and within " + }, + { + "bbox": [ + 67, + 536, + 544, + 660 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 67, + 536, + 544, + 660 + ], + "type": "text", + "content": " CIs on 2 additional tasks (Table 2 and Figure S.5). All metrics for MolE and LlaSMol are reported from Mendez-Lucio et al. [24] and Yu et al. [23]. Given their specialization in small-molecule tasks, LlaSMol and MolE provide strong baselines for evaluating generalist models. Notably, TxGemma, a generalist model encompassing diverse drug types and many different tasks, achieves competitive performance with these dedicated models designed for a narrower range of small-molecule tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 666, + 302, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 666, + 302, + 681 + ], + "spans": [ + { + "bbox": [ + 67, + 666, + 302, + 681 + ], + "type": "text", + "content": "3.2 TxGemma Conversational Capabilities" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 688, + 542, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 688, + 542, + 713 + ], + "spans": [ + { + "bbox": [ + 67, + 688, + 542, + 713 + ], + "type": "text", + "content": "While TxGemma-27B-Predict performs well on prediction tasks, training solely on instruction tuning data for therapeutic properties limits its conversational capacity. TxGemma-27B-Predict can engage in general" + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 97, + 70, + 233, + 205 + ], + "blocks": [ + { + "bbox": [ + 97, + 70, + 233, + 205 + ], + "lines": [ + { + "bbox": [ + 97, + 70, + 233, + 205 + ], + "spans": [ + { + "bbox": [ + 97, + 70, + 233, + 205 + ], + "type": "image", + "image_path": "ebfa122c4e8e4aaa8d9e1067f57d4762781618f9e07826cee01379d3677e3b2b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 238, + 70, + 375, + 205 + ], + "blocks": [ + { + "bbox": [ + 238, + 70, + 375, + 205 + ], + "lines": [ + { + "bbox": [ + 238, + 70, + 375, + 205 + ], + "spans": [ + { + "bbox": [ + 238, + 70, + 375, + 205 + ], + "type": "image", + "image_path": "9b152fe3c52d378f2d027185f60d5bd699d4d295bd539075317ae94dfc9ac7a1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 377, + 71, + 515, + 205 + ], + "blocks": [ + { + "bbox": [ + 377, + 71, + 515, + 205 + ], + "lines": [ + { + "bbox": [ + 377, + 71, + 515, + 205 + ], + "spans": [ + { + "bbox": [ + 377, + 71, + 515, + 205 + ], + "type": "image", + "image_path": "2c3dedc41b7c9e61f949673cd15587fced5ab7c9ed1cd38b27f8cc2f630441e6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 96, + 209, + 233, + 343 + ], + "blocks": [ + { + "bbox": [ + 96, + 209, + 233, + 343 + ], + "lines": [ + { + "bbox": [ + 96, + 209, + 233, + 343 + ], + "spans": [ + { + "bbox": [ + 96, + 209, + 233, + 343 + ], + "type": "image", + "image_path": "1996f43f70147e885c9ca69f311159a6643604936abffe7afc9aded5bd4946cc.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 238, + 209, + 375, + 342 + ], + "blocks": [ + { + "bbox": [ + 238, + 209, + 375, + 342 + ], + "lines": [ + { + "bbox": [ + 238, + 209, + 375, + 342 + ], + "spans": [ + { + "bbox": [ + 238, + 209, + 375, + 342 + ], + "type": "image", + "image_path": "3cd0a375ee3d7de3a86068d0c489eab4f7df74f4ad4e1134db7d492a2c938c19.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 400, + 375, + 445, + 384 + ], + "lines": [ + { + "bbox": [ + 400, + 375, + 445, + 384 + ], + "spans": [ + { + "bbox": [ + 400, + 375, + 445, + 384 + ], + "type": "text", + "content": "SMILES" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 377, + 209, + 515, + 343 + ], + "blocks": [ + { + "bbox": [ + 377, + 209, + 515, + 343 + ], + "lines": [ + { + "bbox": [ + 377, + 209, + 515, + 343 + ], + "spans": [ + { + "bbox": [ + 377, + 209, + 515, + 343 + ], + "type": "image", + "image_path": "5019a41ee3d7b9b09cfa6b183c6cca835c79d3fc3f8b7e470d8b3d39c395d574.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 400, + 396, + 469, + 403 + ], + "lines": [ + { + "bbox": [ + 400, + 396, + 469, + 403 + ], + "spans": [ + { + "bbox": [ + 400, + 396, + 469, + 403 + ], + "type": "text", + "content": "SMILES + Text" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 92, + 348, + 231, + 482 + ], + "blocks": [ + { + "bbox": [ + 92, + 348, + 231, + 482 + ], + "lines": [ + { + "bbox": [ + 92, + 348, + 231, + 482 + ], + "spans": [ + { + "bbox": [ + 92, + 348, + 231, + 482 + ], + "type": "image", + "image_path": "16baf39adc18c0e4e4aa59b45fb1b03adc1282067ff3e5c3d12ac88c14a15786.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 495, + 543, + 605 + ], + "lines": [ + { + "bbox": [ + 67, + 495, + 543, + 605 + ], + "spans": [ + { + "bbox": [ + 67, + 495, + 543, + 605 + ], + "type": "text", + "content": "Figure 4 | Comparison of TxGemma's performance with best-in-class specialist models. TxGemma-27B-Predict is evaluated on each task in TDC and compared to the corresponding best-in-class competitor. The panels depict different metrics used to evaluate the tasks. Tasks are colored by their feature types including one or a combination of SMILE, Amino acid, Nucleotide and text as indicated in the legend. Marker sizes illustrate the number of data points in the task on a log scale. The larger shaded area in blue indicates where TxGemma outperforms best-in-class models, while the narrower light blue shaded area indicates where TxGemma is performing near best-in-class model (defined as within " + }, + { + "bbox": [ + 67, + 495, + 543, + 605 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 67, + 495, + 543, + 605 + ], + "type": "text", + "content": "). MAE and MSE values are log-transformed since the magnitudes of these values depend on the units of outputs. Generation accuracy is the fraction of correct SMILES strings in the USPTO generation task. Values for each task can also be found in Tables S.13 and S.14." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 232, + 348, + 371, + 482 + ], + "blocks": [ + { + "bbox": [ + 232, + 348, + 371, + 482 + ], + "lines": [ + { + "bbox": [ + 232, + 348, + 371, + 482 + ], + "spans": [ + { + "bbox": [ + 232, + 348, + 371, + 482 + ], + "type": "image", + "image_path": "8b46427efa8d29aead6de741c144673fa93c545121e1a5c5fd8f01be42910034.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 400, + 385, + 458, + 393 + ], + "lines": [ + { + "bbox": [ + 400, + 385, + 458, + 393 + ], + "spans": [ + { + "bbox": [ + 400, + 385, + 458, + 393 + ], + "type": "text", + "content": "Amino acid" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 400, + 406, + 505, + 414 + ], + "lines": [ + { + "bbox": [ + 400, + 406, + 505, + 414 + ], + "spans": [ + { + "bbox": [ + 400, + 406, + 505, + 414 + ], + "type": "text", + "content": "Nucleotide + Amino acid" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 400, + 416, + 483, + 424 + ], + "lines": [ + { + "bbox": [ + 400, + 416, + 483, + 424 + ], + "spans": [ + { + "bbox": [ + 400, + 416, + 483, + 424 + ], + "type": "text", + "content": "Amino acid + Text" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 400, + 426, + 493, + 434 + ], + "lines": [ + { + "bbox": [ + 400, + 426, + 493, + 434 + ], + "spans": [ + { + "bbox": [ + 400, + 426, + 493, + 434 + ], + "type": "text", + "content": "Amino acid + SMILES" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 400, + 437, + 456, + 445 + ], + "lines": [ + { + "bbox": [ + 400, + 437, + 456, + 445 + ], + "spans": [ + { + "bbox": [ + 400, + 437, + 456, + 445 + ], + "type": "text", + "content": "Nucleotide" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 631, + 543, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 631, + 543, + 717 + ], + "spans": [ + { + "bbox": [ + 67, + 631, + 543, + 717 + ], + "type": "text", + "content": "conversation, but its performance deteriorates when prompts deviate from the expected format. Figure S.9 shows an example of such decline in TxGemma-27B-Predict's conversational capabilities. To expand the TxGemma family's capabilities and provide a more versatile tool with the ability to explain its reasoning, we trained TxGemma-Chat with a mix of therapeutic and general instruction-tuning data as detailed in Section 2.2. We evaluate these new conversational capabilities through a combination of standard LLM benchmarks and qualitative examples. We also run our models through assurance evaluations, as done for Gemma-3 [25], to verify that TxGemma models adhere to safety policies." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 73, + 142, + 541, + 451 + ], + "blocks": [ + { + "bbox": [ + 67, + 76, + 543, + 137 + ], + "lines": [ + { + "bbox": [ + 67, + 76, + 543, + 137 + ], + "spans": [ + { + "bbox": [ + 67, + 76, + 543, + 137 + ], + "type": "text", + "content": "Table 1 | Comparative performance of TxGemma and MolE on small molecule tasks. Details of the predictive performance of TxGemma-27B-Predict and MolE, a graph-based molecular multi-task foundation model, across various pharmacokinetics and toxicity tasks. Bold values indicate the best performance for each task. Metrics for MolE are reported from Mendez-Lucio et al. [24]. TxGemma-27B-Predict values are bootstrapped averages and " + }, + { + "bbox": [ + 67, + 76, + 543, + 137 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 67, + 76, + 543, + 137 + ], + "type": "text", + "content": " CIs. These pharmacokinetics and toxicity tasks are publicly available in TDC [7]." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 73, + 142, + 541, + 451 + ], + "lines": [ + { + "bbox": [ + 73, + 142, + 541, + 451 + ], + "spans": [ + { + "bbox": [ + 73, + 142, + 541, + 451 + ], + "type": "table", + "html": "
Task TypeTaskMetricMolE [24]TxGemma-27B-Predict
PharmacokineticsCaco2 WangMAE (↓)0.3290.401 (0.358-0.449)
Lipophilicity AstraZenecaMAE (↓)0.4060.538 (0.507-0.570)
Solubility AqSolDBMAE (↓)0.7760.907 (0.870-0.948)
PPBR AZMAE (↓)7.2299.048 (8.141-10.111)
HIA HouAUROC (↑)0.9840.988 (0.972-0.999)
Pgp BroccatelliAUROC (↑)0.9300.937 (0.904-0.964)
Bioavailability MaAUROC (↑)0.6400.694 (0.575-0.801)
BBB MartinsAUROC (↑)0.9030.908 (0.872-0.938)
CYP3A4 Substrate CarbonMangelsAUROC (↑)0.6920.691 (0.601-0.784)
CYP2D6 VeithAUPRC (↑)0.6790.683 (0.639-0.726)
CYP3A4 VeithAUPRC (↑)0.8760.854 (0.836-0.872)
CYP2C9 VeithAUPRC (↑)0.7820.798 (0.767-0.826)
CYP2D6 Substrate CarbonMangelsAUPRC (↑)0.6920.711 (0.570-0.830)
CYP2C9 Substrate CarbonMangelsAUPRC (↑)0.4090.438 (0.302-0.576)
VDss LombardoSpearman (↑)0.6440.559 (0.457-0.655)
Half Life ObachSpearman (↑)0.5780.458 (0.306-0.594)
Clearance Microsome AZSpearman (↑)0.6320.462 (0.353-0.565)
Clearance Hepatocyte AZSpearman (↑)0.4560.260 (0.129-0.384)
ToxicityLD50 ZhuMAE (↓)0.6020.627 (0.597-0.660)
hERGAUROC (↑)0.8350.885 (0.813-0.946)
AMESAUROC (↑)0.8340.816 (0.795-0.838)
DILIAUROC (↑)0.8520.886 (0.810-0.947)
", + "image_path": "97948ef7ade23a9c58cba5b5f186c03c35fd709d4e7765967ffad5cb11de2ff1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 69, + 533, + 545, + 632 + ], + "blocks": [ + { + "bbox": [ + 67, + 468, + 544, + 528 + ], + "lines": [ + { + "bbox": [ + 67, + 468, + 544, + 528 + ], + "spans": [ + { + "bbox": [ + 67, + 468, + 544, + 528 + ], + "type": "text", + "content": "Table 2 | Comparative performance of TxGemma and LlaSMol on small molecule tasks. Comparison of TxGemma-27B-Predict with LlaSMolMistral (best LlaSMol model at 7B) across shared small-molecule tasks. Bold values indicate the best performance for each task. Metrics for LlaSMolMistral are reported from Yu et al. [23]. TxGemma-Predict values are bootstrapped averages and " + }, + { + "bbox": [ + 67, + 468, + 544, + 528 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 67, + 468, + 544, + 528 + ], + "type": "text", + "content": " CIs. These pharmacokinetics, toxicity, and high-throughput screening data and tasks are publicly available in TDC [7]" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 69, + 533, + 545, + 632 + ], + "lines": [ + { + "bbox": [ + 69, + 533, + 545, + 632 + ], + "spans": [ + { + "bbox": [ + 69, + 533, + 545, + 632 + ], + "type": "table", + "html": "
Task TypeTaskMetricLlaSMolMistral [23]TxGemma-27B-PredictTxGemma-9B-Predict
PharmacokineticsBBBP†Accuracy (↑)0.7460.869 (0.835-0.901)0.847 (0.813-0.881)
ESOL†RMSE (↓)1.1501.250 (1.185-1.321)1.360 (1.246-1.480)
Lipo†RMSE (↓)1.0100.710 (0.668-0.752)0.742 (0.700-0.787)
ToxicityClintoxAccuracy (↑)0.9310.926 (0.896-0.956)0.925 (0.892-0.953)
High-throughput screeningHIV*Accuracy (↑)0.9670.968 (0.964-0.972)0.965 (0.961-0.969)
", + "image_path": "d2323cf4b228f6cb908f1814d922975c4028f6cd3bba08c079fa90cf5aa14728.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 67, + 636, + 277, + 646 + ], + "lines": [ + { + "bbox": [ + 67, + 636, + 277, + 646 + ], + "spans": [ + { + "bbox": [ + 67, + 636, + 277, + 646 + ], + "type": "text", + "content": "* To predict whether compounds have anti-HIV properties." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_footnote" + }, + { + "bbox": [ + 67, + 646, + 321, + 656 + ], + "lines": [ + { + "bbox": [ + 67, + 646, + 321, + 656 + ], + "spans": [ + { + "bbox": [ + 67, + 646, + 321, + 656 + ], + "type": "text", + "content": "† Task name is modified to match the nomenclature from Yu et al. [23]." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 675, + 544, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 675, + 544, + 724 + ], + "spans": [ + { + "bbox": [ + 67, + 675, + 544, + 724 + ], + "type": "text", + "content": "TxGemma-Chat bridges the gap between property predictors and general language models To assess the performance of TxGemma-Chat as a general conversational LLM, we evaluated it on the Massive Multitask Language Understanding (MMLU) [26] benchmark, a comprehensive suite of 57 diverse tasks spanning mathematics, history, computer science, law, etc. This benchmark evaluates knowledge, reasoning," + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 533, + 742, + 541, + 751 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 73, + 71, + 298, + 198 + ], + "blocks": [ + { + "bbox": [ + 73, + 71, + 298, + 198 + ], + "lines": [ + { + "bbox": [ + 73, + 71, + 298, + 198 + ], + "spans": [ + { + "bbox": [ + 73, + 71, + 298, + 198 + ], + "type": "image", + "image_path": "cff609f1cff46876c76c043fcbdff25eab187a12d98a2bd260d1ab40fc7d3e3e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 212, + 544, + 296 + ], + "lines": [ + { + "bbox": [ + 67, + 212, + 544, + 296 + ], + "spans": [ + { + "bbox": [ + 67, + 212, + 544, + 296 + ], + "type": "text", + "content": "Figure 5 | TxGemma-Chat bridges the gap between property predictors and general LLMs. Each point represents a therapeutic task in the TDC. The figure depicts relative predictive performance changes of TxGemma-Chat in comparison to TxGemma-Predict (top) and Gemma-2 (bottom) for 9B variants left and 27B variants in right. As expected, TxGemma-27B-Predict outperforms TxGemma-27B-Chat on therapeutic tasks, with TxGemma-27B-Chat showing a " + }, + { + "bbox": [ + 67, + 212, + 544, + 296 + ], + "type": "inline_equation", + "content": "10.69\\%" + }, + { + "bbox": [ + 67, + 212, + 544, + 296 + ], + "type": "text", + "content": " median relative performance reduction. However, TxGemma-27B-Chat exceeds the Gemma-2-27B baseline by " + }, + { + "bbox": [ + 67, + 212, + 544, + 296 + ], + "type": "inline_equation", + "content": "29.67\\%" + }, + { + "bbox": [ + 67, + 212, + 544, + 296 + ], + "type": "text", + "content": " on TDC therapeutic tasks. Similarly, TxGemma-9B-Chat's performance is " + }, + { + "bbox": [ + 67, + 212, + 544, + 296 + ], + "type": "inline_equation", + "content": "10.32\\%" + }, + { + "bbox": [ + 67, + 212, + 544, + 296 + ], + "type": "text", + "content": " lower than TxGemma-9B-Predict's. Values for each task can be found in Tables S.15 and S.16." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 312, + 72, + 541, + 198 + ], + "blocks": [ + { + "bbox": [ + 312, + 72, + 541, + 198 + ], + "lines": [ + { + "bbox": [ + 312, + 72, + 541, + 198 + ], + "spans": [ + { + "bbox": [ + 312, + 72, + 541, + 198 + ], + "type": "image", + "image_path": "29542d68bc684f7ee0c7db74899e9f4990c401e026299fb8377fbbe2aa9ea653.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 323, + 541, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 323, + 541, + 372 + ], + "spans": [ + { + "bbox": [ + 67, + 323, + 541, + 372 + ], + "type": "text", + "content": "and problem-solving abilities across a wide range of academic subjects, providing a measure of overall language understanding. It comprises 14,079 multiple-choice questions, each with four possible answers. For this multiple-choice format, we took the model's prediction as the option with the highest log-likelihood in a zero-shot setting and report overall accuracy as well as per-subject accuracy." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 375, + 544, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 375, + 544, + 461 + ], + "spans": [ + { + "bbox": [ + 67, + 375, + 544, + 461 + ], + "type": "text", + "content": "Figure S.7 compares the performance of TxGemma-27B-Chat, TxGemma-27B-Predict, and Gemma-2-27B on MMLU, a standard benchmark for evaluating general LLMs. TxGemma-27B-Chat achieves an accuracy of " + }, + { + "bbox": [ + 67, + 375, + 544, + 461 + ], + "type": "inline_equation", + "content": "73.87\\%" + }, + { + "bbox": [ + 67, + 375, + 544, + 461 + ], + "type": "text", + "content": ", slightly lower than Gemma-2-27B's " + }, + { + "bbox": [ + 67, + 375, + 544, + 461 + ], + "type": "inline_equation", + "content": "75.38\\%" + }, + { + "bbox": [ + 67, + 375, + 544, + 461 + ], + "type": "text", + "content": ", but TxGemma-27B-Chat shows slight improvements in areas such as medical genetics, high school statistics, and college chemistry. Furthermore, TxGemma-27B-Chat significantly outperforms TxGemma-27B-Predict, which has an accuracy of " + }, + { + "bbox": [ + 67, + 375, + 544, + 461 + ], + "type": "inline_equation", + "content": "53.60\\%" + }, + { + "bbox": [ + 67, + 375, + 544, + 461 + ], + "type": "text", + "content": ". This suggests that while fine-tuning solely on therapeutic data can diminish general knowledge acquired during pre-training, incorporating general instruction-tuning data can mitigate this effect." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 464, + 544, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 464, + 544, + 574 + ], + "spans": [ + { + "bbox": [ + 67, + 464, + 544, + 574 + ], + "type": "text", + "content": "Furthermore, we assess TxGemma-27B-Chat on all therapeutic tasks within TDC. Figure 5 compares the relative performance changes of TxGemma-27B-Chat to TxGemma-27B-Predict and Gemma-2-27B for both 9B and 27B variants across these tasks. As anticipated, TxGemma-27B-Predict outperforms TxGemma-27B-Chat on these predictive tasks, with a median relative performance reduction of " + }, + { + "bbox": [ + 67, + 464, + 544, + 574 + ], + "type": "inline_equation", + "content": "11\\%" + }, + { + "bbox": [ + 67, + 464, + 544, + 574 + ], + "type": "text", + "content": " observed for TxGemma-27B-Chat. Nevertheless, TxGemma-27B-Chat surpasses the baseline Gemma-2-27B, demonstrating a median relative improvement of " + }, + { + "bbox": [ + 67, + 464, + 544, + 574 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 67, + 464, + 544, + 574 + ], + "type": "text", + "content": ". Similarly, TxGemma-9B-Chat shows a " + }, + { + "bbox": [ + 67, + 464, + 544, + 574 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 67, + 464, + 544, + 574 + ], + "type": "text", + "content": " median relative performance reduction compared to TxGemma-9B-Predict. Regression tasks experience the greatest performance decline from the general-purpose training. These results demonstrate how TxGemma-Chat bridges the gap between therapeutic property predictors and general LLMs, functioning as a unified model for both capabilities." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 577, + 544, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 577, + 544, + 687 + ], + "spans": [ + { + "bbox": [ + 67, + 577, + 544, + 687 + ], + "type": "text", + "content": "TxGemma-Chat can provide reasoning for complex tasks. A particularly compelling application of conversational models lies in prompting them to explain their predictions to users. While general LLMs may possess some foundational knowledge concerning therapeutic challenges, they are not accurate for property prediction (Figure 5). In Figure 6, we prompt TxGemma-27B-Chat to answer a question regarding blood-brain barrier permeability using the BBB Martins prompt format. TxGemma-27B-Chat provides only the answer in the initial turn, but when given a subsequent prompt to articulate its rationale, the model provides mechanistic reasoning for its answer based on molecular solubility and the structure of the input molecule derived from the SMILES string. All of this reasoning occurred directly within the model weights, without requiring any preprocessing of the SMILES string." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 689, + 544, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 689, + 544, + 715 + ], + "spans": [ + { + "bbox": [ + 67, + 689, + 544, + 715 + ], + "type": "text", + "content": "Interestingly, prompting structures enable TxGemma-Chat to provide additional reasoning on complex tasks. For instance, while the relationship between blood-brain barrier permeability and lipophilicity is intuitive, some" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 542, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 542, + 752 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 542, + 752 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 88, + 105, + 525, + 315 + ], + "blocks": [ + { + "bbox": [ + 67, + 76, + 543, + 99 + ], + "lines": [ + { + "bbox": [ + 67, + 76, + 543, + 99 + ], + "spans": [ + { + "bbox": [ + 67, + 76, + 543, + 99 + ], + "type": "text", + "content": "Table 3 | Performance of Agentic-Tx. Accuracy of Agentic-Tx compared with SOTA models on ChemBench, GPQA, and HLE benchmarks." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 88, + 105, + 525, + 315 + ], + "lines": [ + { + "bbox": [ + 88, + 105, + 525, + 315 + ], + "spans": [ + { + "bbox": [ + 88, + 105, + 525, + 315 + ], + "type": "table", + "html": "
ModelChemBenchGPQA (Diamond)Humanity's Last Exam
MiniPreferenceChemistryChemistry & Biology
Agentic-Tx (Gemini 2.5-Pro)84.566.281.720.1
Agentic-Tx (Gemini 2.0-Pro)83.465.562.414.5
Agentic-Tx (Gemini 1.5-Pro)80.665.051.811.9
Claude-3.5 (Sonnet)73.0*60.0*†40.4-
GPT-4o72.0*59.0*43.8**3.8
Gemini 2.5-pro82.865.579.517.9
Gemini 2.0-pro79.658.453.311.1
Gemini 1.5-pro74.955.648.210.6
PaperQA2 [28]67.0*56.0*--
o180.0*56.0*64.7**12.3
o3-mini (medium)82.461.362.513.0
o3-mini (high)82.562.064.513.2
Human Expert (Average Performance)27.0---
", + "image_path": "327ad9521c9b1211442d7f2d946b5d27f8bc523409af6030fb32861ad17716a2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 92, + 319, + 382, + 331 + ], + "lines": [ + { + "bbox": [ + 92, + 319, + 382, + 331 + ], + "spans": [ + { + "bbox": [ + 92, + 319, + 382, + 331 + ], + "type": "inline_equation", + "content": "(\\dagger)" + }, + { + "bbox": [ + 92, + 319, + 382, + 331 + ], + "type": "text", + "content": " Using ReAct framework, " + }, + { + "bbox": [ + 92, + 319, + 382, + 331 + ], + "type": "inline_equation", + "content": "(^{*})" + }, + { + "bbox": [ + 92, + 319, + 382, + 331 + ], + "type": "text", + "content": " Extracted from [1], " + }, + { + "bbox": [ + 92, + 319, + 382, + 331 + ], + "type": "inline_equation", + "content": "(^{**})" + }, + { + "bbox": [ + 92, + 319, + 382, + 331 + ], + "type": "text", + "content": " Extracted from [2]" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 350, + 543, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 350, + 543, + 411 + ], + "spans": [ + { + "bbox": [ + 67, + 350, + 543, + 411 + ], + "type": "text", + "content": "tasks such as predicting clinical trial approval are more challenging to reason over. If TxGemma-27B-Chat is prompted to provide reasoning in the same manner as in Figure 6 for predicting clinical trial approval, TxGemma-27B-Chat refuses and directs the user to alternative sources. However, when modifying the original prompt, instructing the model to output reasoning steps before the final answer, it bypasses the refusal and restores reasoning capabilities (Figure S.10)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 420, + 382, + 433 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 420, + 382, + 433 + ], + "spans": [ + { + "bbox": [ + 67, + 420, + 382, + 433 + ], + "type": "text", + "content": "3.3 Agentic Planning and Execution based on TxGemma" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 66, + 441, + 543, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 441, + 543, + 562 + ], + "spans": [ + { + "bbox": [ + 66, + 441, + 543, + 562 + ], + "type": "text", + "content": "Agentic-Tx demonstrates competitive performance on therapeutic benchmarks. We evaluate the capability of Agentic-Tx to assist with therapeutics tasks by means of questions from three benchmarks: GPQA (Diamond) [27], ChemBench [1], and Humanity's Last Exam (HLE) [15]. Within each benchmark, we use existing selections of therapeutic-relevant questions; for GPQA we evaluate GPQA-Chemistry (47 questions), for ChemBench we evaluate ChemBench-Chemical Preference which aims to select an ideal candidate molecule for therapeutic development (1,001 question) and ChemBench-mini, which evaluates across 8 categories of chemistry from toxicity/safety to organic chemistry (236 questions). Finally, for HLE, we evaluate HLE-Chemistry and HLE-Biology (235 questions). For open-ended questions in HLE, we observed a high variation of metric scores depending on the selection of the LLM-rater model [15]. To ensure an objective accuracy measure, we restrict the evaluation to multiple choice questions (MCQs)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 565, + 543, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 565, + 543, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 565, + 543, + 723 + ], + "type": "text", + "content": "As shown in Table 3, Agentic-Tx (Gemini 2.5-Pro), Agentic-Tx (Gemini 2.0-Pro), and Agentic-Tx (Gemini 1.5-Pro) achieve competitive or greater accuracy compared to existing SOTA models across several benchmarks. Specifically, Agentic-Tx (Gemini 2.5-Pro) and Agentic-Tx (Gemini 2.0-Pro) surpasses prior SOTA models on the exceptionally difficult Humanity's Last Exam benchmark (Chemistry & Biology tasks), with Agentic-Tx (Gemini 2.5-Pro) achieving " + }, + { + "bbox": [ + 67, + 565, + 543, + 723 + ], + "type": "inline_equation", + "content": "52.3\\%" + }, + { + "bbox": [ + 67, + 565, + 543, + 723 + ], + "type": "text", + "content": " relative improvement over o3-mini (high) and " + }, + { + "bbox": [ + 67, + 565, + 543, + 723 + ], + "type": "inline_equation", + "content": "13.4\\%" + }, + { + "bbox": [ + 67, + 565, + 543, + 723 + ], + "type": "text", + "content": " over Gemini 2.5-pro, as well as on ChemBench, with relative improvements of " + }, + { + "bbox": [ + 67, + 565, + 543, + 723 + ], + "type": "inline_equation", + "content": "6.3\\%" + }, + { + "bbox": [ + 67, + 565, + 543, + 723 + ], + "type": "text", + "content": " (ChemBench-Preference) and " + }, + { + "bbox": [ + 67, + 565, + 543, + 723 + ], + "type": "inline_equation", + "content": "2.4\\%" + }, + { + "bbox": [ + 67, + 565, + 543, + 723 + ], + "type": "text", + "content": " (ChemBench-Mini) over o3-mini (high) and " + }, + { + "bbox": [ + 67, + 565, + 543, + 723 + ], + "type": "inline_equation", + "content": "1.1\\%" + }, + { + "bbox": [ + 67, + 565, + 543, + 723 + ], + "type": "text", + "content": " (ChemBench-Preference) and " + }, + { + "bbox": [ + 67, + 565, + 543, + 723 + ], + "type": "inline_equation", + "content": "2.0\\%" + }, + { + "bbox": [ + 67, + 565, + 543, + 723 + ], + "type": "text", + "content": " (ChemBench-Mini) over Gemini 2.5-pro. On GPQA (Diamond), Agentic-Tx also achieves SOTA accuracy with " + }, + { + "bbox": [ + 67, + 565, + 543, + 723 + ], + "type": "inline_equation", + "content": "26.7\\%" + }, + { + "bbox": [ + 67, + 565, + 543, + 723 + ], + "type": "text", + "content": " relative improvements over o3-mini and " + }, + { + "bbox": [ + 67, + 565, + 543, + 723 + ], + "type": "inline_equation", + "content": "2.7\\%" + }, + { + "bbox": [ + 67, + 565, + 543, + 723 + ], + "type": "text", + "content": " over Gemini 2.5-pro. All variants of Agentic-Tx outperform their corresponding base Gemini models across all benchmarks, indicating the effectiveness of the Agentic-Tx framework in enhancing LLMs efficacy for advanced reasoning within this domain. This suggests that agentic workflows such as ours represent useful tools for therapeutic development, particularly in areas requiring domain knowledge and the selection of candidate molecules. The agent's ability to leverage external tools and perform multi-step" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 85, + 76, + 306, + 86 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 76, + 306, + 86 + ], + "spans": [ + { + "bbox": [ + 85, + 76, + 306, + 86 + ], + "type": "text", + "content": "Instructions: Answer the following question about drug properties." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 85, + 87, + 447, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 87, + 447, + 114 + ], + "spans": [ + { + "bbox": [ + 85, + 87, + 447, + 114 + ], + "type": "text", + "content": "Context: As a membrane separating circulating blood and brain extracellular fluid, the blood-brain barrier (BBB) is the protection layer that blocks most foreign drugs. Thus the ability of a drug to penetrate the barrier to deliver to the site of action forms a crucial challenge in development of drugs for central nervous system." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 85, + 116, + 271, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 116, + 271, + 125 + ], + "spans": [ + { + "bbox": [ + 85, + 116, + 271, + 125 + ], + "type": "text", + "content": "Question: Given a drug SMILES string, predict whether it" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 85, + 125, + 240, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 125, + 240, + 133 + ], + "spans": [ + { + "bbox": [ + 85, + 125, + 240, + 133 + ], + "type": "text", + "content": "(A) does not cross the BBB (B) crosses the BBB" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 85, + 137, + 374, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 137, + 374, + 146 + ], + "spans": [ + { + "bbox": [ + 85, + 137, + 374, + 146 + ], + "type": "text", + "content": "Drug SMILES: C1CN(CCC1(C=O)NC(CCO)C2=CC=C(C=C2)Cl)N)C3=NC=NC4=C3C=CN44" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 161, + 165, + 200, + 175 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 161, + 165, + 200, + 175 + ], + "spans": [ + { + "bbox": [ + 161, + 165, + 200, + 175 + ], + "type": "text", + "content": "TxGemma" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 162, + 178, + 202, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 178, + 202, + 188 + ], + "spans": [ + { + "bbox": [ + 162, + 178, + 202, + 188 + ], + "type": "text", + "content": "Answer: (B)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 85, + 203, + 269, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 203, + 269, + 213 + ], + "spans": [ + { + "bbox": [ + 85, + 203, + 269, + 213 + ], + "type": "text", + "content": "Explain your reasoning based on the molecule structure." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 161, + 224, + 200, + 235 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 161, + 224, + 200, + 235 + ], + "spans": [ + { + "bbox": [ + 161, + 224, + 200, + 235 + ], + "type": "text", + "content": "TxGemma" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 162, + 241, + 463, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 241, + 463, + 251 + ], + "spans": [ + { + "bbox": [ + 162, + 241, + 463, + 251 + ], + "type": "text", + "content": "The molecule has several properties that suggest it can cross the blood-brain barrier (BBB):" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 162, + 258, + 525, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 258, + 525, + 277 + ], + "spans": [ + { + "bbox": [ + 162, + 258, + 525, + 277 + ], + "type": "text", + "content": "Lipophilicity: The molecule contains multiple aromatic rings and a long alkyl chain, contributing to its lipophilicity. Lipophilicity is a key factor in BBB penetration, as the barrier is primarily composed of lipids." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 162, + 279, + 524, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 279, + 524, + 297 + ], + "spans": [ + { + "bbox": [ + 162, + 279, + 524, + 297 + ], + "type": "text", + "content": "Molecular weight: The molecular weight is within a range (around 400 Da) that is generally considered favorable for BBB permeability." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 162, + 300, + 524, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 300, + 524, + 318 + ], + "spans": [ + { + "bbox": [ + 162, + 300, + 524, + 318 + ], + "type": "text", + "content": "Hydrogen bond donors and acceptors: While there are hydrogen bond donors and acceptors present, they are not excessive, which can sometimes hinder BBB passage." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 162, + 319, + 524, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 319, + 524, + 338 + ], + "spans": [ + { + "bbox": [ + 162, + 319, + 524, + 338 + ], + "type": "text", + "content": "Lack of charged groups: The molecule lacks any charged groups at physiological pH, making it less likely to be repelled by the BBB's tight junctions." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 162, + 340, + 524, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 340, + 524, + 358 + ], + "spans": [ + { + "bbox": [ + 162, + 340, + 524, + 358 + ], + "type": "text", + "content": "Overall, the combination of lipophilicity, moderate molecular weight, balanced hydrogen bonding capacity, and lack of charged groups suggests that this molecule has a good chance of crossing the blood-brain barrier." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 84, + 376, + 436, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 376, + 436, + 385 + ], + "spans": [ + { + "bbox": [ + 84, + 376, + 436, + 385 + ], + "type": "text", + "content": "What makes passing through the blood-brain barrier different from passive diffusion through a membrane?" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 161, + 398, + 200, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 161, + 398, + 200, + 407 + ], + "spans": [ + { + "bbox": [ + 161, + 398, + 200, + 407 + ], + "type": "text", + "content": "TxGemma" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 162, + 418, + 524, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 418, + 524, + 436 + ], + "spans": [ + { + "bbox": [ + 162, + 418, + 524, + 436 + ], + "type": "text", + "content": "While passive diffusion is a major mechanism for molecules to cross the blood-brain barrier (BBB), there are key differences that make BBB passage more challenging and selective:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 166, + 437, + 524, + 558 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 166, + 437, + 524, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 437, + 524, + 464 + ], + "spans": [ + { + "bbox": [ + 166, + 437, + 524, + 464 + ], + "type": "text", + "content": "1. Tight Junctions: The cells forming the BBB are packed very tightly together with specialized structures called tight junctions. These junctions seal the spaces between cells, significantly reducing the gaps through which molecules can passively diffuse." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 166, + 464, + 523, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 464, + 523, + 490 + ], + "spans": [ + { + "bbox": [ + 166, + 464, + 523, + 490 + ], + "type": "text", + "content": "2. Specialized Transport Systems: The BBB has a variety of specialized transport systems that actively move specific molecules across the barrier. These systems are highly selective and often require molecules to have specific structural features or be recognized by specific receptors." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 166, + 490, + 523, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 490, + 523, + 515 + ], + "spans": [ + { + "bbox": [ + 166, + 490, + 523, + 515 + ], + "type": "text", + "content": "3. Efflux Pumps: The BBB also expresses efflux pumps, which are proteins that actively pump molecules back out of the brain. This mechanism helps to prevent potentially harmful substances from entering the brain and maintain a stable brain environment." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 166, + 516, + 523, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 516, + 523, + 533 + ], + "spans": [ + { + "bbox": [ + 166, + 516, + 523, + 533 + ], + "type": "text", + "content": "4. Metabolic Enzymes: The BBB contains enzymes that can metabolize and inactivate certain molecules, further limiting their entry into the brain." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 166, + 533, + 523, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 533, + 523, + 558 + ], + "spans": [ + { + "bbox": [ + 166, + 533, + 523, + 558 + ], + "type": "text", + "content": "5. Astrocyte Endfeet: Astrocytes, a type of glial cell, have projections called endfeet that surround blood vessels in the brain. These endfeet contribute to the BBB's barrier function and regulate the passage of molecules." + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 162, + 561, + 524, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 561, + 524, + 605 + ], + "spans": [ + { + "bbox": [ + 162, + 561, + 524, + 605 + ], + "type": "text", + "content": "In summary, while passive diffusion plays a role in BBB permeability, the presence of tight junctions, specialized transport systems, efflux pumps, metabolic enzymes, and astrocyte endfeet make the BBB a highly selective barrier that restricts the entry of many molecules. This selectivity is crucial for protecting the brain from harmful substances and maintaining a stable internal environment, but it also poses challenges for drug delivery to the central nervous system." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 88, + 623, + 239, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 623, + 239, + 632 + ], + "spans": [ + { + "bbox": [ + 88, + 623, + 239, + 632 + ], + "type": "text", + "content": "Is my molecule likely to run into these issues?" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 161, + 644, + 200, + 654 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 161, + 644, + 200, + 654 + ], + "spans": [ + { + "bbox": [ + 161, + 644, + 200, + 654 + ], + "type": "text", + "content": "TxGemma" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 162, + 658, + 459, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 658, + 459, + 667 + ], + "spans": [ + { + "bbox": [ + 162, + 658, + 459, + 667 + ], + "type": "text", + "content": "Let's analyze how your molecule might be affected by the specific challenges of the BBB ..." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 67, + 683, + 542, + 731 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 683, + 542, + 731 + ], + "spans": [ + { + "bbox": [ + 67, + 683, + 542, + 731 + ], + "type": "text", + "content": "Figure 6 | TxGemma-Chat can be prompted to explain its reasoning or provide additional context. Following an initial question, TxGemma-27B-Chat is asked to explain its reasoning based on molecule structure in the second turn. The model uses its understanding of chemistry and biology to justify its answer and can continually engage with the user on follow-up questions." + } + ] + } + ], + "index": 28, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 472, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 472, + 83 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 472, + 83 + ], + "type": "text", + "content": "reasoning enables it to address more complex queries beyond the scope of traditional LLMs." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 87, + 543, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 87, + 543, + 269 + ], + "spans": [ + { + "bbox": [ + 67, + 87, + 543, + 269 + ], + "type": "text", + "content": "Agentic-Tx effectively leverages various tools based on the therapeutic task requirement. In Figure S.14, we investigate tool usage frequency within the Agentic-Tx system across the ChemBench-Preference and Biology and Chemistry (B&C) HLE datasets. Our analysis reveals that Agentic-Tx tool usage distribution varies significantly depending on the task and dataset. For the ChemBench-Preference task, which focuses on selecting ideal candidate molecules for therapeutic development, the Agentic-Tx system exhibits a high frequency of usage for tools such as SMILES description and toxicity prediction. This suggests a strong emphasis on molecular characterization and safety assessment in this task correctly invoked by Agentic-Tx. In contrast, on the B&C HLE dataset, tool usage is predominantly concentrated on general knowledge retrieval tools like PubMed or Wikipedia search. This indicates that the Agentic-Tx system relies heavily on accessing and synthesizing broad biological or chemical knowledge to address questions in these domains. In Figure S.15, we investigate the breakdown of tool interactions per question and explore how these interactions contribute to performance variations. Our analysis shows that each question can involve up to 8 tool calls, and the high usage of tools such as SMILES description and toxicity prediction tools correlates with overall performance improvement. These results highlight the Agentic-Tx system's adaptive nature, demonstrating its ability to leverage different tools based on the specific requirements of the task." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 277, + 542, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 277, + 542, + 361 + ], + "spans": [ + { + "bbox": [ + 67, + 277, + 542, + 361 + ], + "type": "text", + "content": "Agentic-Tx inference time is suitable for real time human interaction Analysis of Agentic-Tx's inference time indicates efficient performance characteristics. The median time observed for tool execution is 0.55 seconds. The fastest tool (Gene Sequence) completes execution in 0.15 seconds, while the slowest (ToxCast) requires 28.2 seconds. This suggests that Agentic-Tx operates within a timeframe conducive to real-time user interaction. The observed latencies demonstrate suitability for integration into workflows where immediate feedback and responsiveness are desired. The system's ability to maintain a median inference time below one second contributes to an efficient user experience." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 372, + 279, + 384 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 372, + 279, + 384 + ], + "spans": [ + { + "bbox": [ + 67, + 372, + 279, + 384 + ], + "type": "text", + "content": "3.4 Additional Analysis and Ablations" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 393, + 542, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 393, + 542, + 477 + ], + "spans": [ + { + "bbox": [ + 67, + 393, + 542, + 477 + ], + "type": "text", + "content": "Data contamination analysis and data leakage considerations To assess potential data contamination from the Gemma-2 pretraining data, we calculated the overlap between features in the therapeutic instruction-tuning data and the pretraining corpus. For multi-instance tasks, contamination was defined as the presence of any constituent feature (e.g., drug SMILES or target protein sequence in drug-target binding) in the pretraining data. The majority of tasks showed no direct contamination (Figure S.12). For tasks with some contamination, filtering contaminated datapoints and recalculating TxGemma-27B-Predict performance revealed no significant changes (Figure S.13)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 481, + 542, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 481, + 542, + 602 + ], + "spans": [ + { + "bbox": [ + 67, + 481, + 542, + 602 + ], + "type": "text", + "content": "While direct contamination was minimal, we further investigated potential indirect contamination. Although SMILES strings are less common in general web text, pretraining on molecular names could have created learned associations between names and SMILES, potentially influencing test set performance. To test this, we compared the similarity of TxGemma-27B-Predict embeddings for PubChem molecules represented as SMILES strings and their corresponding IUPAC names, against the similarity of embeddings for SMILES strings paired with decoy (randomly selected, incorrect) names. The similarities were statistically equivalent (Figure S.12), confirmed by a two one-sided t-test " + }, + { + "bbox": [ + 67, + 481, + 542, + 602 + ], + "type": "inline_equation", + "content": "(p = 3 \\times 10^{-12}" + }, + { + "bbox": [ + 67, + 481, + 542, + 602 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 481, + 542, + 602 + ], + "type": "inline_equation", + "content": "\\delta = 0.02)" + }, + { + "bbox": [ + 67, + 481, + 542, + 602 + ], + "type": "text", + "content": ". This suggests that TxGemma-27B-Predict did not learn spurious name-SMILES associations during pretraining, likely because names and SMILES were encountered in separate training phases and for different molecules. Therefore, both direct and indirect contamination from pretraining are unlikely to significantly affect our results." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 606, + 542, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 606, + 542, + 715 + ], + "spans": [ + { + "bbox": [ + 67, + 606, + 542, + 715 + ], + "type": "text", + "content": "Fine-tuning TxGemma models improves data efficiency. Given the scarcity of therapeutic data and the potential of TxGemma to serve as a pretrained model for further adaptation, we investigated TxGemma's data efficiency and generalization to new tasks in out-of-distribution settings. Specifically, we fine-tuned the baseline model Gemma-2-27B as well as our TxGemma-27B-Predict on adverse event prediction data from TrialBench [29]. Serious adverse events are critical in assessing the safety profile of a new treatment and accurate prediction of these events allows for better risk management and resource allocation [29]. To ensure a fair evaluation of generalization, we filtered the TrialBench test set to exclude samples overlapping with phase 1, 2, or 3 of clinical trial outcome prediction data in TDC. In addition, datapoints without available SMILES strings are excluded. This lead to 14,368 train and 3,184 test samples." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 71, + 192, + 204 + ], + "blocks": [ + { + "bbox": [ + 72, + 71, + 192, + 204 + ], + "lines": [ + { + "bbox": [ + 72, + 71, + 192, + 204 + ], + "spans": [ + { + "bbox": [ + 72, + 71, + 192, + 204 + ], + "type": "image", + "image_path": "4dfa44b4877c0dff07f7e32085f8af1e800f4ac8bc4d1d5740a2f5789b0e102d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 97, + 213, + 532, + 224 + ], + "lines": [ + { + "bbox": [ + 97, + 213, + 532, + 224 + ], + "spans": [ + { + "bbox": [ + 97, + 213, + 532, + 224 + ], + "type": "text", + "content": "Gemma-27B (S) " + }, + { + "bbox": [ + 97, + 213, + 532, + 224 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 97, + 213, + 532, + 224 + ], + "type": "text", + "content": " TxGemma-27B-Predict (S) --- Gemma-27B (S+T) —— TxGemma-27B-Predict (S+T) --- Best-in-class (S+T)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 67, + 241, + 544, + 326 + ], + "lines": [ + { + "bbox": [ + 67, + 241, + 544, + 326 + ], + "spans": [ + { + "bbox": [ + 67, + 241, + 544, + 326 + ], + "type": "text", + "content": "Figure 7 | TxGemma improves efficiency at adverse event prediction from SMILES strings. The figure shows the AUROC of predicting adverse events in a clinical trial from the drug SMILES strings as a function of the training data fraction for Gemma-2-27B and TxGemma-27B-Predict. Clinical trials are separated based on trial phase, and datapoints without available SMILES strings are excluded. To assess model performance with additional textual information, separate models trained on both SMILES strings and additional textual information are indicated by colored dashed lines, and SOTA models are indicated by gray dashed lines. (S) denotes models trained with SMILES strings only, and " + }, + { + "bbox": [ + 67, + 241, + 544, + 326 + ], + "type": "inline_equation", + "content": "(\\mathrm{S} + \\mathrm{T})" + }, + { + "bbox": [ + 67, + 241, + 544, + 326 + ], + "type": "text", + "content": " those trained with SMILES and textual information (Table S.10)." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 195, + 72, + 306, + 204 + ], + "blocks": [ + { + "bbox": [ + 195, + 72, + 306, + 204 + ], + "lines": [ + { + "bbox": [ + 195, + 72, + 306, + 204 + ], + "spans": [ + { + "bbox": [ + 195, + 72, + 306, + 204 + ], + "type": "image", + "image_path": "868848d097319f43df2f2324861cfe2384f376dcae1a95d6563e3f7b212d5ba8.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 310, + 72, + 422, + 203 + ], + "blocks": [ + { + "bbox": [ + 310, + 72, + 422, + 203 + ], + "lines": [ + { + "bbox": [ + 310, + 72, + 422, + 203 + ], + "spans": [ + { + "bbox": [ + 310, + 72, + 422, + 203 + ], + "type": "image", + "image_path": "6eb51e87119992d386790213cb24f8ca1436c165f652aa4c124e908de08b04eb.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 426, + 72, + 539, + 204 + ], + "blocks": [ + { + "bbox": [ + 426, + 72, + 539, + 204 + ], + "lines": [ + { + "bbox": [ + 426, + 72, + 539, + 204 + ], + "spans": [ + { + "bbox": [ + 426, + 72, + 539, + 204 + ], + "type": "image", + "image_path": "0ab979193ce5715dcc7b650b9114e63d4a217e3ebcd07ca293810378aa73bd12.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 66, + 352, + 544, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 352, + 544, + 498 + ], + "spans": [ + { + "bbox": [ + 66, + 352, + 544, + 498 + ], + "type": "text", + "content": "We consider two settings. Initially, we focus exclusively on drug SMILES strings as the only feature contributing to clinical trial outcome, thereby isolating the influence of therapeutic information by excluding this additional context. To simulate data limitations, we fine-tuned TxGemma-27B-Predict and the baseline Gemma-2-27B on varying fractions of the training data, and then evaluated the newly fine-tuned models performance on the test set after 30 epochs of training (Figure 7). Overall, TxGemma-27B-Predict achieved higher AUROCs with lower amounts of training data, matching the performance of Gemma-2-27B with less than " + }, + { + "bbox": [ + 66, + 352, + 544, + 498 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 66, + 352, + 544, + 498 + ], + "type": "text", + "content": " of retraining data. In the second setting, we explored the performance ceiling by incorporating textual information about the clinical trials, increasing the number of tokens provided to the models by a factor of 4 (Table S.10). This is the setting used by the best-in-class model for adverse event prediction [29]. The addition of textual information allowed our models to consistently outperform existing SOTA methods [29]. However, the performance difference between TxGemma-27B-Predict and Gemma-2-27B decreased in this scenario because the additional textual information diluted the relative importance of the drug SMILES strings." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 66, + 501, + 543, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 501, + 543, + 562 + ], + "spans": [ + { + "bbox": [ + 66, + 501, + 543, + 562 + ], + "type": "text", + "content": "TxGemma inference time is suitable for virtual screening In Figure S.11, we plot the inference speed of TxGemma models of all sizes normalized by the number of TPUv5e chips used for serving. All model sizes are suitably fast for virtual screening, as even the largest 27B model is able to inference around 9,000 samples per day per TPU chip. Using 64 chips for serving, this would yield around 600,000 samples per day for the 27B model, and the smallest 2B model would reach 3,000,000 samples per day." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 66, + 566, + 543, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 566, + 543, + 627 + ], + "spans": [ + { + "bbox": [ + 66, + 566, + 543, + 627 + ], + "type": "text", + "content": "Correlation between clinical trial approval and toxicity predictions We investigated the correlation between TxGemma's clinical trial approval predictions (based on SMILES and target disease) and its toxicity predictions (using TDC's AMES, DILI, and hERG tasks). Figure S.18 shows a consistent, but weak (0.15-0.35), positive Spearman correlation across all phases. This suggests TxGemma associates lower predicted toxicity with approval, but may also consider other factors such as efficacy or drug-likeness." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 66, + 630, + 543, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 630, + 543, + 692 + ], + "spans": [ + { + "bbox": [ + 66, + 630, + 543, + 692 + ], + "type": "text", + "content": "Impact of feature types Figure S.16 presents a performance breakdown of TxGemma-27B-Predict by feature type, compared to Tx-LLM M. In both models, tasks incorporating both SMILES strings and textual features (e.g., disease names, cell line names/description) show the most significant improvement over SOTA. This suggests that the contextual knowledge acquired during LLM pretraining could aid in synthesizing textual information with molecular representations." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 695, + 543, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 695, + 543, + 720 + ], + "spans": [ + { + "bbox": [ + 67, + 695, + 543, + 720 + ], + "type": "text", + "content": "Model size and domain fine-tuning ablations Figure S.17 compares the performance of TxGemma-Predict models across different sizes (2B, 9B, and 27B) on TDC tasks. Pairwise comparisons using the Wilcoxon" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 542, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 542, + 752 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 542, + 752 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 543, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 543, + 145 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 543, + 145 + ], + "type": "text", + "content": "signed-rank test indicate that model size is a significant factor: TxGemma-27B-Predict outperforms TxGemma-9B-Predict " + }, + { + "bbox": [ + 67, + 71, + 543, + 145 + ], + "type": "inline_equation", + "content": "(p = 0.013)" + }, + { + "bbox": [ + 67, + 71, + 543, + 145 + ], + "type": "text", + "content": " and TxGemma-2B-Predict " + }, + { + "bbox": [ + 67, + 71, + 543, + 145 + ], + "type": "inline_equation", + "content": "(p = 6.2 \\times 10^{-6})" + }, + { + "bbox": [ + 67, + 71, + 543, + 145 + ], + "type": "text", + "content": ", and TxGemma-9B-Predict outperforms TxGemma-2B-Predict " + }, + { + "bbox": [ + 67, + 71, + 543, + 145 + ], + "type": "inline_equation", + "content": "(p = 0.048)" + }, + { + "bbox": [ + 67, + 71, + 543, + 145 + ], + "type": "text", + "content": ". Furthermore, comparing TxGemma models to their corresponding base Gemma-2 models reveals the significant impact of domain fine-tuning. All TxGemma models significantly outperform their Gemma-2 counterparts " + }, + { + "bbox": [ + 67, + 71, + 543, + 145 + ], + "type": "inline_equation", + "content": "(p < 10^{-10}" + }, + { + "bbox": [ + 67, + 71, + 543, + 145 + ], + "type": "text", + "content": ", Wilcoxon signed-rank test), underscoring the importance of specialized training for therapeutic tasks." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 158, + 164, + 171 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 158, + 164, + 171 + ], + "spans": [ + { + "bbox": [ + 67, + 158, + 164, + 171 + ], + "type": "text", + "content": "4 Related work" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 181, + 543, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 181, + 543, + 255 + ], + "spans": [ + { + "bbox": [ + 67, + 181, + 543, + 255 + ], + "type": "text", + "content": "Task-specific models for chemistry and therapeutics. In recent years, there has been a surge in the development of deep learning models designed for various chemistry applications. Amongst those, graph neural networks (GNNs) have been applied for a wide variety of molecular prediction or generation tasks because small molecules are naturally represented as graphs [30, 31, 32, 33, 34, 35, 36, 37, 24]. Another common representation for small molecules is molecular fingerprints [38], which are binary vectors that capture the local environment of each atom [30, 39, 40]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 258, + 542, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 258, + 542, + 308 + ], + "spans": [ + { + "bbox": [ + 67, + 258, + 542, + 308 + ], + "type": "text", + "content": "TxGNN trained a GNN on medical knowledge graphs in order to perform zero-shot drug repurposing for diseases with limited treatment options [41]. AlphaFold and its successors have also significantly advanced the field of protein structure prediction and protein design [42, 43, 44, 45, 46]. These models have been influential for both mechanistic research and the development of structure-based drugs [47]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 311, + 544, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 311, + 544, + 373 + ], + "spans": [ + { + "bbox": [ + 67, + 311, + 544, + 373 + ], + "type": "text", + "content": "Large language models for biology and chemistry. Transformer-based models [48] have fueled the development of LLMs, which are trained on massive textual datasets with subsequent instruction-tuning [49] or alignment [50]. LLMs have demonstrated exceptional proficiency in various tasks, including text summarization, translation, and question answering [16, 51, 52]. Their ability to encode vast amounts of information and generalize to new tasks has sparked considerable interest in their potential applications across diverse domains." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 375, + 543, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 375, + 543, + 509 + ], + "spans": [ + { + "bbox": [ + 67, + 375, + 543, + 509 + ], + "type": "text", + "content": "There has been increasing interest in applying the development for LLMs to scientific research. BrainGPT fine-tuned LLMs on neuroscience literature and found greater performance than domain experts [53]. LlaSMol fine-tuned LLMs on small molecule datasets and achieved near-SOTA performance on multiple tasks [23]. CLAMP used separate modules for natural language and molecular inputs, combining them together in a contrastive pre-training objective [54]. Protein language models [55, 56, 57, 58] and genomic language models [59, 60, 61] have used self-supervised pretraining to generate embeddings useful for downstream tasks. ProtLLM [62], BioT5 [63], and GraphToken [64] combine molecule or proteins with LLMs using textual or multi-modal strategies. Cellular foundation models such as scGPT [65], GenePT [66], Geneformer [67], Nicheformer [68], and Cell2Sentence [69] represent cells based on their gene expression to differentiate cell types and understand gene perturbations. NatureLM [70] trained a foundation model that represents small molecules, proteins, RNA, and materials as sequences over a wide variety of scientific tasks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 512, + 543, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 512, + 543, + 694 + ], + "spans": [ + { + "bbox": [ + 67, + 512, + 543, + 694 + ], + "type": "text", + "content": "Agentic Systems. Unlike traditional passive models, agentic systems proactively choose actions to achieve goals [71, 72, 73, 74, 75], involving planning [76, 77, 78, 79, 80] and interaction with external tools [81, 82, 83, 84]. LLMs have enabled such systems by processing complex information and generating action-driving responses. The ReAct framework [22] combines reasoning, action, and observation, with variations incorporating self-reflection [85] or model architectures for internal tool usage [82]. Agentic frameworks enable automating tasks like software development [73, 86, 87, 88] and scientific research [89, 90, 91] including biomedical applications such as nanobody design [90], drug discovery [92], or reaction optimization [93]. ChemCrow [92] is an agent designed to perform chemistry experiments in drug discovery and materials design. The coscientist by Boiko et al. [93] designs and performs chemical experiments by integrating web knowledge, code execution, and experiment automation, demonstrating successful reaction optimization of palladium-catalysed cross-couplings. The multi-agent system AI co-scientist [88] is designed for hypothesis generation over a variety of scientific fields. TxAgent was developed as an agentic framework that provides multi-step reasoning and tool use aimed towards therapeutic applications, processing clinical information to support tasks like treatment recommendation [94]. In contrast to recommending existing therapeutics, Agentic-Tx generally focuses on developing new therapeutics." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 146, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 146, + 84 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 146, + 84 + ], + "type": "text", + "content": "5 Discussion" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 95, + 541, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 95, + 541, + 190 + ], + "spans": [ + { + "bbox": [ + 69, + 95, + 541, + 190 + ], + "type": "text", + "content": "TxGemma's performance suggests a paradigm shift in therapeutic AI development, demonstrating the viability of generalist LLMs. Despite the established dominance of specialist models in niche areas, TxGemma, a relatively lightweight and efficient generalist, achieves competitive results across a wide array of therapeutic tasks. This highlights the potential for broadly trained LLMs, such as those leveraging the comprehensive dataset Therapeutics Data Commons (TDC), to serve as powerful preliminary tools for hypothesis generation, information synthesis, and candidate prioritization. While specialist models would likely retain their value for complex, domain-specific challenges, future research should explore synergistic approaches that combine the strengths of both generalist and specialist therapeutic AI." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 195, + 541, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 195, + 541, + 327 + ], + "spans": [ + { + "bbox": [ + 69, + 195, + 541, + 327 + ], + "type": "text", + "content": "A significant advancement with TxGemma-Chat is its ability to provide reasoning for its predictions, a first in therapeutic AI and a feature lost in TxGemma-Predict, likely due to \"catastrophic forgetting\" [95]. While explainability may introduce a small trade-off in raw predictive power, it provides a crucial window into the model's decision-making, a factor of paramount importance in therapeutic development. For instance, explaining blood-brain barrier permeability based on molecular structure provides valuable insights for medicinal chemists. Beyond its research applications, TxGemma-Chat holds a significant educational potential, enabling students and researchers to explore complex therapeutic concepts. At the same time, it is important to acknowledge that provided explanations are correlations, not necessarily causal, and must be interpreted with caution. The model's occasional inability to explain certain predictions reveals its knowledge boundaries. Future research should prioritize improving reliability and comprehensive explanations. Even with current limitations, TxGemma-Chat represents an important improvement over the \"black-box\" paradigm." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 332, + 541, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 332, + 541, + 441 + ], + "spans": [ + { + "bbox": [ + 69, + 332, + 541, + 441 + ], + "type": "text", + "content": "Expanding beyond single-step predictions, Agentic-Tx demonstrates the potential for LLMs to orchestrate complex workflows. By integrating TxGemma with a suite of external tools (PubMed, Wikipedia, chemical databases, etc), Agentic-Tx can tackle multi-step reasoning tasks that would be difficult for a standalone LLM. Its strong performance on benchmarks like ChemBench Chemical Preference and Humanity's Last Exam (HLE) highlights the synergistic value of integrating domain-specific knowledge from TxGemma with general reasoning and information retrieval. This modular, tool-based design further ensures flexibility and extensibility, allowing for future integration of new tools and data. Importantly, it solves the issue of knowledge cut-off in LLMs by providing access to up-to-date information. Agentic-Tx with its autonomous and collaborative operation is a powerful asset for augmenting researchers and advancing therapeutic development." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 445, + 541, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 445, + 541, + 517 + ], + "spans": [ + { + "bbox": [ + 69, + 445, + 541, + 517 + ], + "type": "text", + "content": "The data efficiency of TxGemma is clearly demonstrated in fine-tuning experiments on TrialBench. It achieves robust performance on novel tasks with substantially less training data compared to baseline models, showcasing the benefits of pre-training on a broad and diverse dataset like TDC. This efficiency is particularly critical in therapeutic domains, where data is often proprietary and limited. Moreover, our finding that adding textual context, while improving overall results, can dilute the influence of molecular representations emphasizes the importance of balancing the benefits of additional information with strategic feature selection." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 521, + 541, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 521, + 541, + 618 + ], + "spans": [ + { + "bbox": [ + 69, + 521, + 541, + 618 + ], + "type": "text", + "content": "Although our in-silico results across a diverse range of therapeutic tasks are highly encouraging, we acknowledge that TxGemma's performance has not yet been validated in real-world, wet-lab experiments. Prospective validation in these settings represents a crucial next step. However, a cornerstone of this work is our commitment to open model release. By making TxGemma readily accessible to the research community, we aim to facilitate its rigorous validation and adaptation. Researchers can tailor TxGemma to their specific datasets, encompassing tasks and distribution shifts beyond the scope of TDC. Given the predominantly proprietary nature of therapeutic data, we believe this collaborative, community-driven approach is essential for translating TxGemma into tangible therapeutic applications" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 632, + 148, + 645 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 632, + 148, + 645 + ], + "spans": [ + { + "bbox": [ + 69, + 632, + 148, + 645 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 656, + 541, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 656, + 541, + 716 + ], + "spans": [ + { + "bbox": [ + 69, + 656, + 541, + 716 + ], + "type": "text", + "content": "In conclusion, this work introduced TxGemma, a suite of efficient, generalist LLMs designed to improve therapeutic development. By leveraging extensive therapeutic instruction-tuning datasets and building upon the foundation of Gemma-2, TxGemma achieves exceptional performance across a wide range of predictive and generative therapeutic tasks, surpassing or matching both generalist and specialist state-of-the-art models. Notably, TxGemma's conversational counterparts, a first in therapeutic AI, provide reasoning and explanations," + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 542, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 542, + 156 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 542, + 156 + ], + "type": "text", + "content": "moving beyond traditional black-box predictions to facilitate mechanistic understanding and scientific discourse. Furthermore, the integration of TxGemma into an agentic system, Agentic-Tx, demonstrates its capacity to solve complex, multi-step problems, achieving state-of-the-art results on challenging reasoning-intensive tasks. Finally, and critically, the open release of TxGemma empowers the research community and scientist to adapt and refine the models on their own private data, potentially leading to significant advancements in drug discovery and development. Through these contributions, TxGemma represents a meaningful step towards more efficient, transparent, and collaborative AI-driven therapeutic research." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 180, + 162, + 192 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 180, + 162, + 192 + ], + "spans": [ + { + "bbox": [ + 67, + 180, + 162, + 192 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 200, + 543, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 200, + 543, + 321 + ], + "spans": [ + { + "bbox": [ + 67, + 200, + 543, + 321 + ], + "type": "text", + "content": "This project was a collaboration between teams at Google DeepMind and Google Research. We thank Marcus Brubaker, David Belanger, Justin Chen, and David Steiner for the feedback and insight which significantly contributed to the enhancement of this report. We thank Tris Warkentin, Glenn Cameron, Victor Cotruta, Fereshteh Mahvar, Tiffany Chen, Omar Sansevier, Kathleen Kenealy, Joe Fernandez, Gus Martins, Nabila Babar, Sara Smoot, Antonia Paterson, Pankil Botadra, Metin Toksoz-Exley, Tim Thelin, Can \"John\" Kirmizi, and Fayaz Jamil for their collaborative efforts in enabling the open model launch of TxGemma. We also thank Phoebe Kirk, Rachelle Sico, Yun Liu, Anand Rao, Jon Small, Juanita Bawagan, Jane Park, Jenn Sturgeon, Fred Alcober, Samantha Heyman, Abhinav Das for their valuable insights and technical support. We are also grateful to Zoubin Ghahramani, Raia Hadsell, Avinatan Hassidim, Katherine Chou, Dale Webster, Jon Shlens, and Pushmeet Kohli for their support during the course of this project." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 330, + 171, + 341 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 330, + 171, + 341 + ], + "spans": [ + { + "bbox": [ + 67, + 330, + 171, + 341 + ], + "type": "text", + "content": "Inclusion and ethics" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 350, + 542, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 350, + 542, + 411 + ], + "spans": [ + { + "bbox": [ + 67, + 350, + 542, + 411 + ], + "type": "text", + "content": "While AI offers transformative potential in drug discovery, ethical considerations and transparency remain crucial. Biases in training data can lead to inequities, highlighting the need for diverse datasets and explainable AI systems. Our model, while still in the research stage, highlights the continuous need for development and refinement in this field. We acknowledge the difficulty in explaining the inner workings of complex models, but remain dedicated to advancing research in this area." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 420, + 154, + 432 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 420, + 154, + 432 + ], + "spans": [ + { + "bbox": [ + 67, + 420, + 154, + 432 + ], + "type": "text", + "content": "Data availability" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 440, + 542, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 440, + 542, + 489 + ], + "spans": [ + { + "bbox": [ + 67, + 440, + 542, + 489 + ], + "type": "text", + "content": "The Therapeutics Data Commons (TDC) datasets used for developing, benchmarking, and evaluating TxGemma are publicly available on their website. The benchmarking datasets used in this study—Humanity's Last Exam (HLE), GPQA (Diamond), ChemBench, and TrialBench (Serious Adverse Event Prediction)—are all publicly available via their respective websites." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 498, + 156, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 498, + 156, + 510 + ], + "spans": [ + { + "bbox": [ + 67, + 498, + 156, + 510 + ], + "type": "text", + "content": "Code availability" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 518, + 543, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 518, + 543, + 639 + ], + "spans": [ + { + "bbox": [ + 67, + 518, + 543, + 639 + ], + "type": "text", + "content": "All of the components used in this work are available publicly. For reproducibility, we have documented technical methods and data curation detail in depth, while keeping the paper accessible to clinical and general scientific audiences. Specifically, all the data needs to reproduce this work is publicly accessible to the community. TxGemma, a collection of lightweight state-of-the-art, open language models, are provided for researchers in three model size of 2B, 9B, and 27B and is accessible through Vertex AI Model Garden and Hugging Face. TxGemma's Github repository including supporting code and colab notebooks for quick start are also available at: https://github.com/google-gemini/gemma-cookbook/tree/main/TxGemma. We have specifically provided starter colabs for inference, fine-tuning, and exploring agentic capabilities. TxGemma remains a research model and requires refinement. We look forward to working with research partners, regulators, and providers to validate and explore safe onward uses of TxGemma." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 647, + 180, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 647, + 180, + 659 + ], + "spans": [ + { + "bbox": [ + 67, + 647, + 180, + 659 + ], + "type": "text", + "content": "Author Contributions" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 668, + 542, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 668, + 542, + 717 + ], + "spans": [ + { + "bbox": [ + 67, + 668, + 542, + 717 + ], + "type": "text", + "content": "E.W., S.S., and S.A. made substantial contributions to the conception, design, and evaluation of this work. They played a key role in data analysis, interpretation of results, and the drafting and revision of the manuscript. P.F.J. contributed to drafting and revision of the manuscript. F.Z. contributed to the data processing and model training in the manuscript. R.P. contributed to obtaining necessary legal approvals," + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 542, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 542, + 752 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 542, + 752 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 542, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 542, + 97 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 542, + 97 + ], + "type": "text", + "content": "and organizational support. All authors participated in critically reviewing and revising the manuscript and interpreting the data and findings." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 105, + 174, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 105, + 174, + 118 + ], + "spans": [ + { + "bbox": [ + 67, + 105, + 174, + 118 + ], + "type": "text", + "content": "Competing interests" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 125, + 543, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 125, + 543, + 162 + ], + "spans": [ + { + "bbox": [ + 67, + 125, + 543, + 162 + ], + "type": "text", + "content": "This study was funded by Alphabet Inc and/or a subsidiary thereof ('Alphabet'). E.W., S.S., P.F.J., F.Z., R.P., Y.M., J.B., D.F., and S.A. are employees of Alphabet and may own stock as part of the standard compensation package." + } + ] + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 74, + 72, + 140, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 72, + 140, + 83 + ], + "spans": [ + { + "bbox": [ + 74, + 72, + 140, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 95, + 542, + 714 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 72, + 95, + 542, + 114 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 95, + 542, + 114 + ], + "spans": [ + { + "bbox": [ + 72, + 95, + 542, + 114 + ], + "type": "text", + "content": "1. Mirza, A., Alampara, N., Kunchapu, S., Rios-Garcia, M., Emoekabu, B., Krishnan, A., Gupta, T., Schilling-Wilhelmi, M., Okereke, M., Aneesh, A., et al. Are large language models superhuman chemists? arXiv preprint arXiv:2404.01475 (2024)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 72, + 116, + 542, + 136 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 116, + 542, + 136 + ], + "spans": [ + { + "bbox": [ + 72, + 116, + 542, + 136 + ], + "type": "text", + "content": "2. OpenAI. Learning to Reason with LLMs https://openai.com/index/learning-to-reason-with-llms/. Accessed: Wednesday 9th April, 2025. 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 72, + 137, + 542, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 137, + 542, + 156 + ], + "spans": [ + { + "bbox": [ + 72, + 137, + 542, + 156 + ], + "type": "text", + "content": "3. Sun, D., Gao, W., Hu, H. & Zhou, S. Why " + }, + { + "bbox": [ + 72, + 137, + 542, + 156 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 72, + 137, + 542, + 156 + ], + "type": "text", + "content": " of clinical drug development fails and how to improve it? Acta Pharmaceutica Sinica B 12, 3049-3062 (2022)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 72, + 158, + 542, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 158, + 542, + 176 + ], + "spans": [ + { + "bbox": [ + 72, + 158, + 542, + 176 + ], + "type": "text", + "content": "4. Hinkson, I. V., Madej, B. & Stahlberg, E. A. Accelerating therapeutics for opportunities in medicine: a paradigm shift in drug discovery. Frontiers in pharmacology 11, 770 (2020)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 72, + 178, + 542, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 178, + 542, + 196 + ], + "spans": [ + { + "bbox": [ + 72, + 178, + 542, + 196 + ], + "type": "text", + "content": "5. Kumar, A., Voet, A. & Zhang, K. Y. Fragment based drug design: from experimental to computational approaches. *Current medicinal chemistry* 19, 5128-5147 (2012)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 72, + 198, + 542, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 198, + 542, + 216 + ], + "spans": [ + { + "bbox": [ + 72, + 198, + 542, + 216 + ], + "type": "text", + "content": "6. Velez-Arce, A., Huang, K., Li, M. M., Lin, X., Gao, W., Fu, T., Kellis, M., Pentelute, B. L. & Zitnik, M. TDC-2: Multimodal foundation for therapeutic science. bioRxiv, 2024-06 (2024)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 72, + 217, + 542, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 217, + 542, + 245 + ], + "spans": [ + { + "bbox": [ + 72, + 217, + 542, + 245 + ], + "type": "text", + "content": "7. Huang, K., Fu, T., Gao, W., Zhao, Y., Roohani, Y., Leskovec, J., Coley, C. W., Xiao, C., Sun, J. & Zitnik, M. Therapeutics data commons: Machine learning datasets and tasks for drug discovery and development. arXiv preprint arXiv:2102.09548 (2021)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 72, + 247, + 542, + 265 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 247, + 542, + 265 + ], + "spans": [ + { + "bbox": [ + 72, + 247, + 542, + 265 + ], + "type": "text", + "content": "8. Huang, K., Fu, T., Gao, W., Zhao, Y., Roohani, Y., Leskovec, J., Coley, C. W., Xiao, C., Sun, J. & Zitnik, M. Artificial intelligence foundation for therapeutic science. Nature chemical biology 18, 1033-1036 (2022)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 72, + 266, + 542, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 266, + 542, + 285 + ], + "spans": [ + { + "bbox": [ + 72, + 266, + 542, + 285 + ], + "type": "text", + "content": "9. Bubeck, S., Chandrasekaran, V., Eldan, R., Gehrke, J., Horvitz, E., Kamar, E., Lee, P., Lee, Y. T., Li, Y., Lundberg, S., et al. Sparks of artificial general intelligence: Early experiments with GPT-4. arXiv preprint arXiv:2303.12712 (2023)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 286, + 542, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 286, + 542, + 304 + ], + "spans": [ + { + "bbox": [ + 69, + 286, + 542, + 304 + ], + "type": "text", + "content": "10. Taylor, R., Kardas, M., Cucurull, G., Scialom, T., Hartshorn, A., Saravia, E., Poulton, A., Kerkez, V. & Stojnic, R. Galactica: A large language model for science. arXiv preprint arXiv:2211.09085 (2022)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 306, + 542, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 306, + 542, + 325 + ], + "spans": [ + { + "bbox": [ + 69, + 306, + 542, + 325 + ], + "type": "text", + "content": "11. Telenti, A., Auli, M., Hie, B. L., Maher, C., Saria, S. & Ioannidis, J. P. Large language models for science and medicine. European journal of clinical investigation 54, e14183 (2024)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 327, + 542, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 327, + 542, + 345 + ], + "spans": [ + { + "bbox": [ + 69, + 327, + 542, + 345 + ], + "type": "text", + "content": "12. Chaves, J. M. Z., Wang, E., Tu, T., Vaishnav, E. D., Lee, B., Mahdavi, S. S., Semturs, C., Fleet, D., Natarajan, V. & Azizi, S. Tx-LLM: A Large Language Model for Therapeutics. arXiv preprint arXiv:2406.06316 (2024)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 346, + 542, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 346, + 542, + 365 + ], + "spans": [ + { + "bbox": [ + 69, + 346, + 542, + 365 + ], + "type": "text", + "content": "13. Team, G., Mesnard, T., Hardin, C., Dadashi, R., Bhupatiraju, S., Pathak, S., Sifre, L., Riviere, M., Kale, M. S., Love, J., et al. Gemma: Open models based on gemini research and technology. arXiv preprint arXiv:2403.08295 (2024)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 366, + 542, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 366, + 542, + 385 + ], + "spans": [ + { + "bbox": [ + 69, + 366, + 542, + 385 + ], + "type": "text", + "content": "14. Team, G., Riviere, M., Pathak, S., Sessa, P. G., Hardin, C., Bhupatiraju, S., Hussenot, L., Mesnard, T., Shahriari, B., Ramé, A., et al. Gemma 2: Improving open language models at a practical size. arXiv preprint arXiv:2408.00118 (2024)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 387, + 542, + 405 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 387, + 542, + 405 + ], + "spans": [ + { + "bbox": [ + 69, + 387, + 542, + 405 + ], + "type": "text", + "content": "15. Phan, L., Gatti, A., Han, Z., Li, N., Hu, J., Zhang, H., Shi, S., Choi, M., Chopra, A., et al. Humanity's Last Exam. arXiv preprint arXiv:2501.14249 (2025)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 407, + 542, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 407, + 542, + 425 + ], + "spans": [ + { + "bbox": [ + 69, + 407, + 542, + 425 + ], + "type": "text", + "content": "16. Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J. D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al. Language models are few-shot learners. Advances in neural information processing systems 33, 1877-1901 (2020)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 426, + 542, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 426, + 542, + 453 + ], + "spans": [ + { + "bbox": [ + 69, + 426, + 542, + 453 + ], + "type": "text", + "content": "17. Longpre, S., Hou, L., Vu, T., Webson, A., Chung, H. W., Tay, Y., Zhou, D., Le, Q. V., Zoph, B., Wei, J., et al. The FLAN collection: Designing data and methods for effective instruction tuning in International Conference on Machine Learning (2023), 22631-22648." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 455, + 542, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 455, + 542, + 474 + ], + "spans": [ + { + "bbox": [ + 69, + 455, + 542, + 474 + ], + "type": "text", + "content": "18. Team, G., Anil, R., Borgeaud, S., Alayrac, J.-B., Yu, J., Soricut, R., Schalkwyk, J., Dai, A. M., Hauth, A., Millican, K., et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805 (2023)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 69, + 475, + 542, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 475, + 542, + 494 + ], + "spans": [ + { + "bbox": [ + 69, + 475, + 542, + 494 + ], + "type": "text", + "content": "19. Landrum, G. RDKit: Open-Source Cheminformatics Software. https://github.com/rdkit/rdkit/releases/tag/Release_2016_09_4 (2016)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 495, + 375, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 495, + 375, + 506 + ], + "spans": [ + { + "bbox": [ + 69, + 495, + 375, + 506 + ], + "type": "text", + "content": "20. Dalke, A. The chemfp project. Journal of cheminformatics 11, 1-21 (2019)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 69, + 507, + 542, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 507, + 542, + 533 + ], + "spans": [ + { + "bbox": [ + 69, + 507, + 542, + 533 + ], + "type": "text", + "content": "21. Sievers, F., Wilm, A., Dineen, D., Gibson, T. J., Karplus, K., Li, W., Lopez, R., McWilliam, H., Remmert, M., Söding, J., et al. Fast, scalable generation of high-quality protein multiple sequence alignments using Clustal Omega. Molecular systems biology 7, 539 (2011)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 69, + 535, + 542, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 535, + 542, + 553 + ], + "spans": [ + { + "bbox": [ + 69, + 535, + 542, + 553 + ], + "type": "text", + "content": "22. Yao, S., Zhao, J., Yu, D., Du, N., Shafran, I., Narasimhan, K. & Cao, Y. React: Synergizing reasoning and acting in language models. arXiv preprint arXiv:2210.03629 (2022)." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 69, + 555, + 542, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 555, + 542, + 574 + ], + "spans": [ + { + "bbox": [ + 69, + 555, + 542, + 574 + ], + "type": "text", + "content": "23. Yu, B., Baker, F. N., Chen, Z., Ning, X. & Sun, H. Llasmol: Advancing large language models for chemistry with a large-scale, comprehensive, high-quality instruction tuning dataset. arXiv preprint arXiv:2402.09391 (2024)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 69, + 575, + 542, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 575, + 542, + 594 + ], + "spans": [ + { + "bbox": [ + 69, + 575, + 542, + 594 + ], + "type": "text", + "content": "24. Mendez-Lucio, O., Nicolaou, C. A. & Earnshaw, B. MolE: a foundation model for molecular graphs using disentangled attention. Nature Communications 15, 9431 (2024)." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 69, + 595, + 326, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 595, + 326, + 605 + ], + "spans": [ + { + "bbox": [ + 69, + 595, + 326, + 605 + ], + "type": "text", + "content": "25. Team, G. Gemma 3 technical report. Google DeepMind (2025)." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 69, + 607, + 542, + 625 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 607, + 542, + 625 + ], + "spans": [ + { + "bbox": [ + 69, + 607, + 542, + 625 + ], + "type": "text", + "content": "26. Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D. & Steinhardt, J. Measuring massive multitask language understanding. arXiv preprint arXiv:2009.03300 (2020)." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 69, + 627, + 542, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 627, + 542, + 645 + ], + "spans": [ + { + "bbox": [ + 69, + 627, + 542, + 645 + ], + "type": "text", + "content": "27. Rein, D., Hou, B. L., Stickland, A. C., Petty, J., Pang, R. Y., Dirani, J., Michael, J. & Bowman, S. R. Gpqa: A graduate-level google-proof q@a benchmark in First Conference on Language Modeling (2024)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 69, + 647, + 542, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 647, + 542, + 673 + ], + "spans": [ + { + "bbox": [ + 69, + 647, + 542, + 673 + ], + "type": "text", + "content": "28. Skarlinski, M. D., Cox, S., Laurent, J. M., Braza, J. D., Hinks, M., Hammerling, M. J., Ponnapati, M., Rodriques, S. G. & White, A. D. Language agents achieve superhuman synthesis of scientific knowledge. arXiv preprint arXiv:2409.13740 (2024)." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 69, + 675, + 542, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 675, + 542, + 693 + ], + "spans": [ + { + "bbox": [ + 69, + 675, + 542, + 693 + ], + "type": "text", + "content": "29. Chen, J., Hu, Y., Wang, Y., Lu, Y., Cao, X., Lin, M., Xu, H., Wu, J., Xiao, C., Sun, J., et al. TrialBench: Multi-modal artificial intelligence-ready clinical trial datasets. arXiv preprint arXiv:2407.00631 (2024)." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 69, + 695, + 542, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 695, + 542, + 714 + ], + "spans": [ + { + "bbox": [ + 69, + 695, + 542, + 714 + ], + "type": "text", + "content": "30. Torng, W. & Altman, R. B. Graph convolutional neural networks for predicting drug-target interactions. Journal of chemical information and modeling 59, 4131-4149 (2019)." + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 543, + 708 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 67, + 72, + 543, + 91 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 543, + 91 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 543, + 91 + ], + "type": "text", + "content": "31. Stärk, H., Ganea, O., Pattanaik, L., Barzilay, R. & Jaakkola, T. Equibind: Geometric deep learning for drug binding structure prediction in International conference on machine learning (2022), 20503-20521." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 92, + 543, + 120 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 92, + 543, + 120 + ], + "spans": [ + { + "bbox": [ + 67, + 92, + 543, + 120 + ], + "type": "text", + "content": "32. Xiong, Z., Wang, D., Liu, X., Zhong, F., Wan, X., Li, X., Li, Z., Luo, X., Chen, K., Jiang, H., et al. Pushing the boundaries of molecular representation for drug discovery with the graph attention mechanism. Journal of medicinal chemistry 63, 8749-8760 (2019)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 121, + 542, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 121, + 542, + 140 + ], + "spans": [ + { + "bbox": [ + 67, + 121, + 542, + 140 + ], + "type": "text", + "content": "33. Heid, E. & Green, W. H. Machine learning of reaction properties via learned representations of the condensed graph of reaction. Journal of Chemical Information and Modeling 62, 2101-2110 (2021)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 141, + 542, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 141, + 542, + 168 + ], + "spans": [ + { + "bbox": [ + 67, + 141, + 542, + 168 + ], + "type": "text", + "content": "34. Yang, K., Swanson, K., Jin, W., Coley, C., Eiden, P., Gao, H., Guzman-Perez, A., Hopper, T., Kelley, B., Mathea, M., et al. Analyzing learned molecular representations for property prediction. Journal of chemical information and modeling 59, 3370-3388 (2019)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 170, + 542, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 170, + 542, + 198 + ], + "spans": [ + { + "bbox": [ + 67, + 170, + 542, + 198 + ], + "type": "text", + "content": "35. Morrone, J. A., Weber, J. K., Huynh, T., Luo, H. & Cornell, W. D. Combining docking pose rank and structure with deep learning improves protein-ligand binding mode prediction over a baseline docking approach. Journal of chemical information and modeling 60, 4170-4179 (2020)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 198, + 542, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 198, + 542, + 217 + ], + "spans": [ + { + "bbox": [ + 67, + 198, + 542, + 217 + ], + "type": "text", + "content": "36. Mohr, B., Shmilovich, K., Kleinwächter, I. S., Schneider, D., Ferguson, A. L. & Bereau, T. Data-driven discovery of cardiolipin-selective small molecules by computational active learning. Chemical Science 13, 4498-4511 (2022)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 218, + 542, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 218, + 542, + 237 + ], + "spans": [ + { + "bbox": [ + 67, + 218, + 542, + 237 + ], + "type": "text", + "content": "37. Stokes, J. M., Yang, K., Swanson, K., Jin, W., Cubillos-Ruiz, A., Donghia, N. M., MacNair, C. R., French, S., Carfrae, L. A., Bloom-Ackermann, Z., et al. A deep learning approach to antibiotic discovery. Cell 180, 688-702 (2020)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 238, + 542, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 238, + 542, + 257 + ], + "spans": [ + { + "bbox": [ + 67, + 238, + 542, + 257 + ], + "type": "text", + "content": "38. Rogers, D. & Hahn, M. Extended-connectivity fingerprints. Journal of chemical information and modeling 50, 742-754 (2010)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 258, + 542, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 258, + 542, + 286 + ], + "spans": [ + { + "bbox": [ + 67, + 258, + 542, + 286 + ], + "type": "text", + "content": "39. Tayyebi, A., Alshami, A. S., Rabiei, Z., Yu, X., Ismail, N., Talukder, M. J. & Power, J. Prediction of organic compound aqueous solubility using machine learning: a comparison study of descriptor-based and fingerprints-based models. Journal of Cheminformatics 15, 99 (2023)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 287, + 542, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 287, + 542, + 306 + ], + "spans": [ + { + "bbox": [ + 67, + 287, + 542, + 306 + ], + "type": "text", + "content": "40. Belenahalli Shekarappa, S., Kandagalla, S. & Lee, J. Development of machine learning models based on molecular fingerprints for selection of small molecule inhibitors against JAK2 protein. Journal of Computational Chemistry 44, 1493-1504 (2023)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 307, + 542, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 307, + 542, + 326 + ], + "spans": [ + { + "bbox": [ + 67, + 307, + 542, + 326 + ], + "type": "text", + "content": "41. Huang, K., Chandak, P., Wang, Q., Havaldar, S., Vaid, A., Leskovec, J., Nadkarni, G. N., Glicksberg, B. S., Gehlenborg, N. & Zitnik, M. A foundation model for clinician-centered drug repurposing. Nature Medicine, 1-13 (2024)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 327, + 542, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 327, + 542, + 348 + ], + "spans": [ + { + "bbox": [ + 67, + 327, + 542, + 348 + ], + "type": "text", + "content": "42. Jumper, J., Evans, R., Pritzel, A., Green, T., Figurnov, M., Ronneberger, O., Tunyasuvunakool, K., Bates, R., Zidek, A., Potapenko, A., et al. Highly accurate protein structure prediction with AlphaFold. nature 596, 583-589 (2021)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 350, + 542, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 350, + 542, + 369 + ], + "spans": [ + { + "bbox": [ + 67, + 350, + 542, + 369 + ], + "type": "text", + "content": "43. Tunyasuvunakool, K., Adler, J., Wu, Z., Green, T., Zielinski, M., Žídek, A., Bridgland, A., Cowie, A., Meyer, C., Laydon, A., et al. Highly accurate protein structure prediction for the human proteome. Nature 596, 590-596 (2021)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 371, + 542, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 371, + 542, + 392 + ], + "spans": [ + { + "bbox": [ + 67, + 371, + 542, + 392 + ], + "type": "text", + "content": "44. Senior, A. W., Evans, R., Jumper, J., Kirkpatrick, J., Sifre, L., Green, T., Qin, C., Zidek, A., Nelson, A. W., Bridgland, A., et al. Improved protein structure prediction using potentials from deep learning. Nature 577, 706-710 (2020)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 392, + 542, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 392, + 542, + 411 + ], + "spans": [ + { + "bbox": [ + 67, + 392, + 542, + 411 + ], + "type": "text", + "content": "45. Abramson, J., Adler, J., Dunger, J., Evans, R., Green, T., Pritzel, A., Ronneberger, O., Willmore, L., Ballard, A. J., Bambrick, J., et al. Accurate structure prediction of biomolecular interactions with AlphaFold 3. Nature, 1-3 (2024)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 67, + 411, + 542, + 439 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 411, + 542, + 439 + ], + "spans": [ + { + "bbox": [ + 67, + 411, + 542, + 439 + ], + "type": "text", + "content": "46. Zambaldi, V., La, D., Chu, A. E., Patani, H., Danson, A. E., Kwan, T. O., Frerix, T., Schneider, R. G., Saxton, D., Thillaisundaram, A., et al. De novo design of high-affinity protein binders with AlphaProteo. arXiv preprint arXiv:2409.08022 (2024)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 67, + 441, + 542, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 441, + 542, + 468 + ], + "spans": [ + { + "bbox": [ + 67, + 441, + 542, + 468 + ], + "type": "text", + "content": "47. Ren, F., Ding, X., Zheng, M., Korzinkin, M., Cai, X., Zhu, W., Mantsyzov, A., Aliper, A., Aladinskiy, V., Cao, Z., et al. AlphaFold accelerates artificial intelligence powered drug discovery: efficient discovery of a novel CDK20 small molecule inhibitor. Chemical science 14, 1443-1452 (2023)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 67, + 469, + 460, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 469, + 460, + 480 + ], + "spans": [ + { + "bbox": [ + 67, + 469, + 460, + 480 + ], + "type": "text", + "content": "48. Vaswani, A. Attention is all you need. Advances in Neural Information Processing Systems (2017)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 67, + 481, + 542, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 481, + 542, + 499 + ], + "spans": [ + { + "bbox": [ + 67, + 481, + 542, + 499 + ], + "type": "text", + "content": "49. Zhang, S., Dong, L., Li, X., Zhang, S., Sun, X., Wang, S., Li, J., Hu, R., Zhang, T., Wu, F., et al. Instruction tuning for large language models: A survey. arXiv preprint arXiv:2308.10792 (2023)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 67, + 500, + 542, + 519 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 500, + 542, + 519 + ], + "spans": [ + { + "bbox": [ + 67, + 500, + 542, + 519 + ], + "type": "text", + "content": "50. Kaufmann, T., Weng, P., Bengs, V. & Hüllermeier, E. A survey of reinforcement learning from human feedback. arXiv preprint arXiv:2312.14925 (2023)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 67, + 521, + 501, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 521, + 501, + 531 + ], + "spans": [ + { + "bbox": [ + 67, + 521, + 501, + 531 + ], + "type": "text", + "content": "51. Liu, Y. & Lapata, M. Text summarization with pretrained encoders. arXiv preprint arXiv:1908.08345 (2019)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 67, + 532, + 542, + 551 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 532, + 542, + 551 + ], + "spans": [ + { + "bbox": [ + 67, + 532, + 542, + 551 + ], + "type": "text", + "content": "52. Kenton, J. D. M.-W. C. & Toutanova, L. K. BERT: Pre-training of deep bidirectional transformers for language understanding in Proceedings of naacL-HLT 1 (2019)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 67, + 552, + 542, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 552, + 542, + 579 + ], + "spans": [ + { + "bbox": [ + 67, + 552, + 542, + 579 + ], + "type": "text", + "content": "53. Luo, X., Rechardt, A., Sun, G., Nejad, K. K., Yáñez, F., Yilmaz, B., Lee, K., Cohen, A. O., Borghesani, V., Pashkov, A., et al. Large language models surpass human experts in predicting neuroscience results. Nature human behaviour, 1-11 (2024)." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 67, + 581, + 542, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 581, + 542, + 599 + ], + "spans": [ + { + "bbox": [ + 67, + 581, + 542, + 599 + ], + "type": "text", + "content": "54. Seidl, P., Vall, A., Hochreiter, S. & Klambauer, G. Enhancing activity prediction models in drug discovery with the ability to understand human language in International Conference on Machine Learning (2023), 30458-30490." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 67, + 600, + 542, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 600, + 542, + 628 + ], + "spans": [ + { + "bbox": [ + 67, + 600, + 542, + 628 + ], + "type": "text", + "content": "55. Rives, A., Meier, J., Sercu, T., Goyal, S., Lin, Z., Liu, J., Guo, D., Ott, M., Zitnick, C. L., Ma, J., et al. Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences. Proceedings of the National Academy of Sciences 118, e2016239118 (2021)." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 67, + 629, + 542, + 648 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 629, + 542, + 648 + ], + "spans": [ + { + "bbox": [ + 67, + 629, + 542, + 648 + ], + "type": "text", + "content": "56. Lin, Z., Akin, H., Rao, R., Hie, B., Zhu, Z., Lu, W., Smetanin, N., Verkuil, R., Kabeli, O., Shmueli, Y., et al. Evolutionary-scale prediction of atomic-level protein structure with a language model. Science 379, 1123-1130 (2023)." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 67, + 649, + 542, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 649, + 542, + 668 + ], + "spans": [ + { + "bbox": [ + 67, + 649, + 542, + 668 + ], + "type": "text", + "content": "57. Alley, E. C., Khimulya, G., Biswas, S., AlQuraishi, M. & Church, G. M. Unified rational protein engineering with sequence-based deep representation learning. Nature methods 16, 1315-1322 (2019)." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 67, + 669, + 542, + 688 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 669, + 542, + 688 + ], + "spans": [ + { + "bbox": [ + 67, + 669, + 542, + 688 + ], + "type": "text", + "content": "58. Ferruz, N., Schmidt, S. & Höcker, B. ProtGPT2 is a deep unsupervised language model for protein design. Nature communications 13, 4348 (2022)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 67, + 689, + 542, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 689, + 542, + 708 + ], + "spans": [ + { + "bbox": [ + 67, + 689, + 542, + 708 + ], + "type": "text", + "content": "59. Nguyen, E., Poli, M., Durrant, M. G., Kang, B., Katrekar, D., Li, D. B., Bartie, L. J., Thomas, A. W., King, S. H., Brixi, G., et al. Sequence modeling and design from molecular to genome scale with Evo. Science 386, eado9336 (2024)." + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 72, + 542, + 721 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 68, + 72, + 542, + 100 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 72, + 542, + 100 + ], + "spans": [ + { + "bbox": [ + 68, + 72, + 542, + 100 + ], + "type": "text", + "content": "60. Dalla-Torre, H., Gonzalez, L., Mendoza-Revilla, J., Lopez Carranza, N., Grzywaczewski, A. H., Oteri, F., Dallago, C., Trop, E., de Almeida, B. P., Sirelkhatim, H., et al. Nucleotide Transformer: building and evaluating robust foundation models for human genomics. Nature Methods, 1-11 (2024)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 101, + 542, + 120 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 101, + 542, + 120 + ], + "spans": [ + { + "bbox": [ + 68, + 101, + 542, + 120 + ], + "type": "text", + "content": "61. Cornman, A., West-Roberts, J., Camargo, A. P., Roux, S., Beracochea, M., Mirdita, M., Ovchinnikov, S. & Hwang, Y. The OMG dataset: An Open MetaGenomic corpus for mixed-modality genomic language modeling. bioRxiv, 2024-08 (2024)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 121, + 541, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 121, + 541, + 140 + ], + "spans": [ + { + "bbox": [ + 68, + 121, + 541, + 140 + ], + "type": "text", + "content": "62. Zhuo, L., Chi, Z., Xu, M., Huang, H., Zheng, H., He, C., Mao, X.-L. & Zhang, W. Protllm: An interleaved protein-language llm with protein-as-word pre-training. arXiv preprint arXiv:2403.07920 (2024)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 141, + 541, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 141, + 541, + 160 + ], + "spans": [ + { + "bbox": [ + 68, + 141, + 541, + 160 + ], + "type": "text", + "content": "63. Pei, Q., Zhang, W., Zhu, J., Wu, K., Gao, K., Wu, L., Xia, Y. & Yan, R. Biot5: Enriching cross-modal integration in biology with chemical knowledge and natural language associations. arXiv preprint arXiv:2310.07276 (2023)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 161, + 541, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 161, + 541, + 179 + ], + "spans": [ + { + "bbox": [ + 68, + 161, + 541, + 179 + ], + "type": "text", + "content": "64. Anonymous. Parameter Efficient Graph Encoding for Large Language Models 2025. https://openreview.net/forum?id=RbcXV63ZJk." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 180, + 541, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 180, + 541, + 201 + ], + "spans": [ + { + "bbox": [ + 68, + 180, + 541, + 201 + ], + "type": "text", + "content": "65. Cui, H., Wang, C., Maan, H., Pang, K., Luo, F., Duan, N. & Wang, B. scGPT: toward building a foundation model for single-cell multi-omics using generative AI. Nature Methods, 1-11 (2024)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 201, + 541, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 201, + 541, + 220 + ], + "spans": [ + { + "bbox": [ + 68, + 201, + 541, + 220 + ], + "type": "text", + "content": "66. Chen, Y. & Zou, J. GenePT: a simple but effective foundation model for genes and cells built from ChatGPT. bioRxiv, 2023-10 (2024)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 221, + 541, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 221, + 541, + 240 + ], + "spans": [ + { + "bbox": [ + 68, + 221, + 541, + 240 + ], + "type": "text", + "content": "67. Theodoris, C. V., Xiao, L., Chopra, A., Chaffin, M. D., Al Sayed, Z. R., Hill, M. C., Mantineo, H., Brydon, E. M., Zeng, Z., Liu, X. S., et al. Transfer learning enables predictions in network biology. Nature 618, 616-624 (2023)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 241, + 541, + 260 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 241, + 541, + 260 + ], + "spans": [ + { + "bbox": [ + 68, + 241, + 541, + 260 + ], + "type": "text", + "content": "68. Schaar, A. C., Tejada-Lapuerta, A., Palla, G., Gutgesell, R., Halle, L., Minaeva, M., Vornholz, L., Dony, L., Drummer, F., Bahrami, M., et al. Nicheformer: a foundation model for single-cell and spatial omics. bioRxiv, 2024-04 (2024)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 261, + 541, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 261, + 541, + 280 + ], + "spans": [ + { + "bbox": [ + 68, + 261, + 541, + 280 + ], + "type": "text", + "content": "69. Levine, D., Rizvi, S. A., Lévy, S., Pallikkavaliyaveetil, N., Zhang, D., Chen, X., Ghadermarzi, S., Wu, R., Zheng, Z., Vrkic, I., et al. Cell2Sentence: teaching large language models the language of biology. BioRxiv, 2023-09 (2023)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 281, + 541, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 281, + 541, + 300 + ], + "spans": [ + { + "bbox": [ + 68, + 281, + 541, + 300 + ], + "type": "text", + "content": "70. Xia, Y., Jin, P., Xie, S., He, L., Cao, C., Luo, R., Liu, G., Wang, Y., Liu, Z., Chen, Y.-J., et al. NatureLM: Deciphering the Language of Nature for Scientific Discovery. arXiv preprint arXiv:2502.07527 (2025)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 301, + 541, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 301, + 541, + 320 + ], + "spans": [ + { + "bbox": [ + 68, + 301, + 541, + 320 + ], + "type": "text", + "content": "71. Wang, L., Ma, C., Feng, X., Zhang, Z., Yang, H., Zhang, J., Chen, Z., Tang, J., Chen, X., Lin, Y., et al. A survey on large language model based autonomous agents. Frontiers of Computer Science 18, 186345 (2024)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 68, + 321, + 511, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 321, + 511, + 332 + ], + "spans": [ + { + "bbox": [ + 68, + 321, + 511, + 332 + ], + "type": "text", + "content": "72. Shanahan, M., McDonell, K. & Reynolds, L. Role play with large language models. Nature 623, 493-498 (2023)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 68, + 333, + 541, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 333, + 541, + 352 + ], + "spans": [ + { + "bbox": [ + 68, + 333, + 541, + 352 + ], + "type": "text", + "content": "73. Qian, C., Cong, X., Yang, C., Chen, W., Su, Y., Xu, J., Liu, Z. & Sun, M. Communicative agents for software development. arXiv preprint arXiv:2307.07924 6 (2023)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 68, + 353, + 541, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 353, + 541, + 372 + ], + "spans": [ + { + "bbox": [ + 68, + 353, + 541, + 372 + ], + "type": "text", + "content": "74. Hong, S., Zheng, X., Chen, J., Cheng, Y., Wang, J., Zhang, C., Wang, Z., Yau, S. K. S., Lin, Z., Zhou, L., et al. Metagpt: Meta programming for multi-agent collaborative framework. arXiv preprint arXiv:2308.00352 (2023)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 68, + 373, + 541, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 373, + 541, + 392 + ], + "spans": [ + { + "bbox": [ + 68, + 373, + 541, + 392 + ], + "type": "text", + "content": "75. Talebirad, Y. & Nadiri, A. Multi-agent collaboration: Harnessing the power of intelligent llm agents. arXiv preprint arXiv:2306.03314 (2023)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 68, + 393, + 541, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 393, + 541, + 412 + ], + "spans": [ + { + "bbox": [ + 68, + 393, + 541, + 412 + ], + "type": "text", + "content": "76. Hao, S., Gu, Y., Ma, H., Hong, J. J., Wang, Z., Wang, D. Z. & Hu, Z. Reasoning with language model is planning with world model. arXiv preprint arXiv:2305.14992 (2023)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 68, + 413, + 541, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 413, + 541, + 432 + ], + "spans": [ + { + "bbox": [ + 68, + 413, + 541, + 432 + ], + "type": "text", + "content": "77. Huang, W., Abbeel, P., Pathak, D. & Mordatch, I. Language models as zero-shot planners: Extracting actionable knowledge for embodied agents in International conference on machine learning (2022), 9118-9147." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 68, + 433, + 541, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 433, + 541, + 460 + ], + "spans": [ + { + "bbox": [ + 68, + 433, + 541, + 460 + ], + "type": "text", + "content": "78. Song, C. H., Wu, J., Washington, C., Sadler, B. M., Chao, W.-L. & Su, Y. Lm-planner: Few-shot grounded planning for embodied agents with large language models in Proceedings of the IEEE/CVF International Conference on Computer Vision (2023), 2998-3009." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 68, + 461, + 541, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 461, + 541, + 480 + ], + "spans": [ + { + "bbox": [ + 68, + 461, + 541, + 480 + ], + "type": "text", + "content": "79. Wang, Z., Cai, S., Chen, G., Liu, A., Ma, X. & Liang, Y. Describe, explain, plan and select: Interactive planning with large language models enables open-world multi-task agents. arXiv preprint arXiv:2302.01560 (2023)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 68, + 481, + 541, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 481, + 541, + 501 + ], + "spans": [ + { + "bbox": [ + 68, + 481, + 541, + 501 + ], + "type": "text", + "content": "80. Yao, S., Yu, D., Zhao, J., Shafran, I., Griffiths, T., Cao, Y. & Narasimhan, K. Tree of thoughts: Deliberate problem solving with large language models. Advances in Neural Information Processing Systems 36 (2024)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 68, + 502, + 520, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 502, + 520, + 512 + ], + "spans": [ + { + "bbox": [ + 68, + 502, + 520, + 512 + ], + "type": "text", + "content": "81. Parisi, A., Zhao, Y. & Fiedel, N. Talm: Tool augmented language models. arXiv preprint arXiv:2205.12255 (2022)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 68, + 513, + 542, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 513, + 542, + 540 + ], + "spans": [ + { + "bbox": [ + 68, + 513, + 542, + 540 + ], + "type": "text", + "content": "82. Schick, T., Dwivedi-Yu, J., Dessi', R., Raileanu, R., Lomeli, M., Hambro, E., Zettlemoyer, L., Cancedda, N. & Scialom, T. Toolformer: Language models can teach themselves to use tools. Advances in Neural Information Processing Systems 36, 68539-68551 (2023)." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 68, + 542, + 541, + 560 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 542, + 541, + 560 + ], + "spans": [ + { + "bbox": [ + 68, + 542, + 541, + 560 + ], + "type": "text", + "content": "83. Qin, Y., Hu, S., Lin, Y., Chen, W., Ding, N., Cui, G., Zeng, Z., Zhou, X., Huang, Y., Xiao, C., et al. Tool learning with foundation models. ACM Computing Surveys 57, 1-40 (2024)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 68, + 561, + 541, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 561, + 541, + 580 + ], + "spans": [ + { + "bbox": [ + 68, + 561, + 541, + 580 + ], + "type": "text", + "content": "84. Cai, T., Wang, X., Ma, T., Chen, X. & Zhou, D. Large language models as tool makers. arXiv preprint arXiv:2305.17126 (2023)." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 68, + 582, + 541, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 582, + 541, + 601 + ], + "spans": [ + { + "bbox": [ + 68, + 582, + 541, + 601 + ], + "type": "text", + "content": "85. Shinn, N., Cassano, F., Gopinath, A., Narasimhan, K. & Yao, S. Reflexion: Language agents with verbal reinforcement learning. Advances in Neural Information Processing Systems 36 (2024)." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 68, + 601, + 541, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 601, + 541, + 620 + ], + "spans": [ + { + "bbox": [ + 68, + 601, + 541, + 620 + ], + "type": "text", + "content": "86. Yang, J., Jimenez, C. E., Wettig, A., Lieret, K., Yao, S., Narasimhan, K. & Press, O. Swe-agent: Agent-computer interfaces enable automated software engineering. arXiv preprint arXiv:2405.15793 (2024)." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 68, + 621, + 541, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 621, + 541, + 641 + ], + "spans": [ + { + "bbox": [ + 68, + 621, + 541, + 641 + ], + "type": "text", + "content": "87. Qian, C., Dang, Y., Li, J., Liu, W., Chen, W., Yang, C., Liu, Z. & Sun, M. Experiential co-learning of software-developing agents. arXiv preprint arXiv:2312.17025 (2023)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 68, + 642, + 541, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 642, + 541, + 661 + ], + "spans": [ + { + "bbox": [ + 68, + 642, + 541, + 661 + ], + "type": "text", + "content": "88. Gottweis, J., Weng, W.-H., Daryin, A., Tu, T., Palepu, A., Sirkovic, P., Myaskovsky, A., Weissenberger, F., Rong, K., Tanno, R., et al. Towards an AI co-scientist. arXiv preprint arXiv:2502.18864 (2025)." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 68, + 662, + 541, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 662, + 541, + 681 + ], + "spans": [ + { + "bbox": [ + 68, + 662, + 541, + 681 + ], + "type": "text", + "content": "89. Schmidgall, S., Su, Y., Wang, Z., Sun, X., Wu, J., Yu, X., Liu, J., Liu, Z. & Barsoum, E. Agent Laboratory: Using LLM Agents as Research Assistants. arXiv preprint arXiv:2501.04227 (2025)." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 68, + 681, + 541, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 681, + 541, + 700 + ], + "spans": [ + { + "bbox": [ + 68, + 681, + 541, + 700 + ], + "type": "text", + "content": "90. Swanson, K., Wu, W., Bulaong, N. L., Pak, J. E. & Zou, J. The virtual lab: Ai agents design new sars-cov-2 nanobodies with experimental validation. bioRxiv, 2024-11 (2024)." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 68, + 702, + 541, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 702, + 541, + 721 + ], + "spans": [ + { + "bbox": [ + 68, + 702, + 541, + 721 + ], + "type": "text", + "content": "91. Lu, C., Lu, C., Lange, R. T., Foerster, J., Clune, J. & Ha, D. The ai scientist: Towards fully automated open-ended scientific discovery. arXiv preprint arXiv:2408.06292 (2024)." + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 540, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 540, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 540, + 751 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 542, + 152 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 67, + 72, + 542, + 91 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 542, + 91 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 542, + 91 + ], + "type": "text", + "content": "92. M. Bran, A., Cox, S., Schilter, O., Baldassari, C., White, A. D. & Schwaller, P. Augmenting large language models with chemistry tools. Nature Machine Intelligence, 1-11 (2024)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 92, + 542, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 92, + 542, + 112 + ], + "spans": [ + { + "bbox": [ + 67, + 92, + 542, + 112 + ], + "type": "text", + "content": "93. Boiko, D. A., MacKnight, R., Kline, B. & Gomes, G. Autonomous chemical research with large language models. Nature 624, 570-578 (2023)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 113, + 542, + 132 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 113, + 542, + 132 + ], + "spans": [ + { + "bbox": [ + 67, + 113, + 542, + 132 + ], + "type": "text", + "content": "94. Gao, S., Zhu, R., Kong, Z., Noori, A., Su, X., Ginder, C., Tsiligkaridis, T. & Zitnik, M. TxAgent: An AI Agent for Therapeutic Reasoning Across a Universe of Tools. arXiv preprint arXiv:2503.10970 (2025)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 133, + 542, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 133, + 542, + 152 + ], + "spans": [ + { + "bbox": [ + 67, + 133, + 542, + 152 + ], + "type": "text", + "content": "95. Aleixo, E. L., Colonna, J. G., Cristo, M. & Fernandes, E. Catastrophic forgetting in deep learning: A comprehensive taxonomy. arXiv preprint arXiv:2312.10549 (2023)." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 68, + 269, + 87 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 68, + 269, + 87 + ], + "spans": [ + { + "bbox": [ + 67, + 68, + 269, + 87 + ], + "type": "text", + "content": "Supplementary Material" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 100, + 161, + 113 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 100, + 161, + 113 + ], + "spans": [ + { + "bbox": [ + 67, + 100, + 161, + 113 + ], + "type": "text", + "content": "Version control" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 125, + 204, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 125, + 204, + 138 + ], + "spans": [ + { + "bbox": [ + 67, + 125, + 204, + 138 + ], + "type": "text", + "content": "V0 (25 March 2025) " + }, + { + "bbox": [ + 67, + 125, + 204, + 138 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 67, + 125, + 204, + 138 + ], + "type": "text", + "content": " V1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 82, + 148, + 541, + 197 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 82, + 148, + 541, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 148, + 541, + 171 + ], + "spans": [ + { + "bbox": [ + 82, + 148, + 541, + 171 + ], + "type": "text", + "content": "- Upgraded the Agentic-Tx system's orchestrator from Gemini 2.0 to Gemini 2.5. This enhancement results in significant performance improvements in complex workflow orchestration, as detailed in Table 3." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 82, + 172, + 541, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 172, + 541, + 197 + ], + "spans": [ + { + "bbox": [ + 82, + 172, + 541, + 197 + ], + "type": "text", + "content": "- Added performance results of TxGemma-Predict and TxGemma-Chat (trained only on commercially licensed datasets) for binary classification (Table S.17), regression, and generation tasks (Table S.18)." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 210, + 144, + 224 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 210, + 144, + 224 + ], + "spans": [ + { + "bbox": [ + 67, + 210, + 144, + 224 + ], + "type": "text", + "content": "A Summary" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 83, + 234, + 247, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 234, + 247, + 244 + ], + "spans": [ + { + "bbox": [ + 83, + 234, + 247, + 244 + ], + "type": "text", + "content": "Data details as listed in Section B:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 102, + 249, + 541, + 358 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 102, + 249, + 372, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 249, + 372, + 260 + ], + "spans": [ + { + "bbox": [ + 102, + 249, + 372, + 260 + ], + "type": "text", + "content": "- Table S.1: Excluded TDC tasks and reasons for exclusion." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 102, + 261, + 541, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 261, + 541, + 284 + ], + "spans": [ + { + "bbox": [ + 102, + 261, + 541, + 284 + ], + "type": "text", + "content": "- Table S.2: Number of samples in training, validation, and test sets for all binary classification tasks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 102, + 285, + 541, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 285, + 541, + 308 + ], + "spans": [ + { + "bbox": [ + 102, + 285, + 541, + 308 + ], + "type": "text", + "content": "- Table S.3: Number of samples in training, validation, and test sets for all regression and generation tasks." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 103, + 309, + 365, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 309, + 365, + 321 + ], + "spans": [ + { + "bbox": [ + 103, + 309, + 365, + 321 + ], + "type": "text", + "content": "- Table S.4: Descriptions of the binary classification tasks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 103, + 322, + 389, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 322, + 389, + 334 + ], + "spans": [ + { + "bbox": [ + 103, + 322, + 389, + 334 + ], + "type": "text", + "content": "- Table S.5: Descriptions of the regression and generation tasks." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 102, + 334, + 502, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 334, + 502, + 346 + ], + "spans": [ + { + "bbox": [ + 102, + 334, + 502, + 346 + ], + "type": "text", + "content": "- Table S.6 Types of features in the processed TDC data along with illustrative examples." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 102, + 346, + 514, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 346, + 514, + 358 + ], + "spans": [ + { + "bbox": [ + 102, + 346, + 514, + 358 + ], + "type": "text", + "content": "Figure S.1: Distribution of TDC task sizes, aggregated over train, validation, and test sets." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 83, + 361, + 323, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 361, + 323, + 373 + ], + "spans": [ + { + "bbox": [ + 83, + 361, + 323, + 373 + ], + "type": "text", + "content": "Method and modeling details as listed in Section C:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 103, + 377, + 541, + 486 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 103, + 377, + 387, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 377, + 387, + 389 + ], + "spans": [ + { + "bbox": [ + 103, + 377, + 387, + 389 + ], + "type": "text", + "content": "- Table S.7 Examples of prompts for binary classification tasks." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 103, + 389, + 411, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 389, + 411, + 401 + ], + "spans": [ + { + "bbox": [ + 103, + 389, + 411, + 401 + ], + "type": "text", + "content": "- Table S.8 Examples of prompts for regression and generation tasks." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 103, + 402, + 426, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 402, + 426, + 413 + ], + "spans": [ + { + "bbox": [ + 103, + 402, + 426, + 413 + ], + "type": "text", + "content": "- Table S.9 Example of a 10-shot prompt for a binary classification task." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 103, + 413, + 456, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 413, + 456, + 425 + ], + "spans": [ + { + "bbox": [ + 103, + 413, + 456, + 425 + ], + "type": "text", + "content": "- Table S.10 Example of prompts for predicting adverse events in clinical trials." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 103, + 426, + 457, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 426, + 457, + 437 + ], + "spans": [ + { + "bbox": [ + 103, + 426, + 457, + 437 + ], + "type": "text", + "content": "- Table S.11 Example of Agentic-Tx response to a chemical preference question." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 103, + 437, + 326, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 437, + 326, + 449 + ], + "spans": [ + { + "bbox": [ + 103, + 437, + 326, + 449 + ], + "type": "text", + "content": "- Table S.12 List of tools available to Agentic-Tx." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 103, + 449, + 541, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 449, + 541, + 472 + ], + "spans": [ + { + "bbox": [ + 103, + 449, + 541, + 472 + ], + "type": "text", + "content": "- Figure S.2 Distribution of Tanimoto similarities for 10 nearest neighbors by dataset splits in the AMES task." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 103, + 474, + 498, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 474, + 498, + 486 + ], + "spans": [ + { + "bbox": [ + 103, + 474, + 498, + 486 + ], + "type": "text", + "content": "- Section C.1 Details about Wilcoxon signed-rank test used to assess model performance." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 83, + 490, + 272, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 490, + 272, + 501 + ], + "spans": [ + { + "bbox": [ + 83, + 490, + 272, + 501 + ], + "type": "text", + "content": "Additional results as listed in Section D:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 103, + 505, + 367, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 505, + 367, + 517 + ], + "spans": [ + { + "bbox": [ + 103, + 505, + 367, + 517 + ], + "type": "text", + "content": "- Additional prediction results for TxGemma (Section D.1)" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 122, + 518, + 541, + 723 + ], + "type": "list", + "angle": 0, + "index": 38, + "blocks": [ + { + "bbox": [ + 122, + 518, + 541, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 518, + 541, + 542 + ], + "spans": [ + { + "bbox": [ + 122, + 518, + 541, + 542 + ], + "type": "text", + "content": "* Table S.13 Performance on binary classification tasks for specialist SOTA, base Gemma-2, and TxGemma-Predict models." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 123, + 543, + 541, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 543, + 541, + 566 + ], + "spans": [ + { + "bbox": [ + 123, + 543, + 541, + 566 + ], + "type": "text", + "content": "* Table S.14 Performance on regression and generation tasks for specialist SOTA, base Gemma-2, and TxGemma-Predict models." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 123, + 567, + 541, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 567, + 541, + 590 + ], + "spans": [ + { + "bbox": [ + 123, + 567, + 541, + 590 + ], + "type": "text", + "content": "* Table S.15 Performance on binary classification tasks for TxGemma-Predict, TxGemma-Chat, and Tx-LLM models." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 123, + 591, + 541, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 591, + 541, + 613 + ], + "spans": [ + { + "bbox": [ + 123, + 591, + 541, + 613 + ], + "type": "text", + "content": "* Table S.16 Performance on regression and generation tasks for TxGemma-Predict, TxGemma-Chat, and Tx-LLM models." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 123, + 615, + 541, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 615, + 541, + 638 + ], + "spans": [ + { + "bbox": [ + 123, + 615, + 541, + 638 + ], + "type": "text", + "content": "* Table S.17 Performance on binary classification tasks for TxGemma-Predict and TxGemma-Chat models trained only on datasets with commercial licenses." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 123, + 639, + 541, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 639, + 541, + 662 + ], + "spans": [ + { + "bbox": [ + 123, + 639, + 541, + 662 + ], + "type": "text", + "content": "* Table S.18 Performance on regression and generation tasks for TxGemma-Predict and TxGemma-Chat models trained only on datasets with commercial licenses." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 123, + 663, + 541, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 663, + 541, + 687 + ], + "spans": [ + { + "bbox": [ + 123, + 663, + 541, + 687 + ], + "type": "text", + "content": "* Figure S.4 Performance of TxGemma-27B-Predict compared to generalist and specialist SOTA models." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 122, + 687, + 541, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 687, + 541, + 699 + ], + "spans": [ + { + "bbox": [ + 122, + 687, + 541, + 699 + ], + "type": "text", + "content": "* Figure S.5 Comparison of TxGemma-27B-Predict with LlaSMol on select small molecule tasks." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 123, + 700, + 537, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 700, + 537, + 712 + ], + "spans": [ + { + "bbox": [ + 123, + 700, + 537, + 712 + ], + "type": "text", + "content": "* Figure S.6 Comparison of TxGemma-27B-Predict with MolE on select small molecule tasks." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 123, + 712, + 421, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 712, + 421, + 723 + ], + "spans": [ + { + "bbox": [ + 123, + 712, + 421, + 723 + ], + "type": "text", + "content": "* Figure S.11 Inference speed of TxGemma models at various sizes." + } + ] + } + ], + "index": 37 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 39 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 123, + 72, + 541, + 167 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 123, + 72, + 474, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 72, + 474, + 83 + ], + "spans": [ + { + "bbox": [ + 123, + 72, + 474, + 83 + ], + "type": "text", + "content": "* Figure S.12 Percent contamination for datasets and cosine similarity analysis." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 123, + 83, + 541, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 83, + 541, + 106 + ], + "spans": [ + { + "bbox": [ + 123, + 83, + 541, + 106 + ], + "type": "text", + "content": "* Figure S.13 Performance on contaminated datasets before and after filtering out contaminated datapoints." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 123, + 107, + 448, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 107, + 448, + 118 + ], + "spans": [ + { + "bbox": [ + 123, + 107, + 448, + 118 + ], + "type": "text", + "content": "* Figure S.16 Performance by feature type of all TxGemma-Predict sizes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 123, + 119, + 541, + 143 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 119, + 541, + 143 + ], + "spans": [ + { + "bbox": [ + 123, + 119, + 541, + 143 + ], + "type": "text", + "content": "* Figure S.17 Comparison of TxGemma-Predict performances over different sizes and with Gemma-2 models." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 123, + 144, + 541, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 144, + 541, + 167 + ], + "spans": [ + { + "bbox": [ + 123, + 144, + 541, + 167 + ], + "type": "text", + "content": "* Figure S.18 Correlations of TxGemma-27B-Predict predictions for toxicity and clinical trial approval tasks." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 170, + 467, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 170, + 467, + 181 + ], + "spans": [ + { + "bbox": [ + 104, + 170, + 467, + 181 + ], + "type": "text", + "content": "- Conversing with TxGemma-27B-Predict and TxGemma-27B-Chat (Section D.2)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 123, + 183, + 541, + 255 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 123, + 183, + 541, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 183, + 541, + 206 + ], + "spans": [ + { + "bbox": [ + 123, + 183, + 541, + 206 + ], + "type": "text", + "content": "* Figure S.7 Comparison of TxGemma-27B-Predict, TxGemma-27B-Chat, and Gemma-2-27B on MMLU." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 123, + 208, + 502, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 208, + 502, + 219 + ], + "spans": [ + { + "bbox": [ + 123, + 208, + 502, + 219 + ], + "type": "text", + "content": "* Figure S.8 Example of a dialogue with TxGemma-27B-Predict about general topics." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 123, + 220, + 541, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 220, + 541, + 232 + ], + "spans": [ + { + "bbox": [ + 123, + 220, + 541, + 232 + ], + "type": "text", + "content": "* Figure S.9 Example of a multi-turn dialogue with TxGemma-27B-Predict about its predictions." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 123, + 232, + 541, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 232, + 541, + 255 + ], + "spans": [ + { + "bbox": [ + 123, + 232, + 541, + 255 + ], + "type": "text", + "content": "* Figure S.10 Example of a prompt format the enables TxGemma-Chat to provide reasoning for challenging tasks." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 258, + 310, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 258, + 310, + 270 + ], + "spans": [ + { + "bbox": [ + 104, + 258, + 310, + 270 + ], + "type": "text", + "content": "- Additional Agentic-Tx Results (Section D.3)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 123, + 272, + 529, + 296 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 123, + 272, + 529, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 272, + 529, + 283 + ], + "spans": [ + { + "bbox": [ + 123, + 272, + 529, + 283 + ], + "type": "text", + "content": "* Figure S.14 Agentic-Tx tool use frequencies for chemical preference and HLE benchmarks." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 123, + 284, + 526, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 284, + 526, + 296 + ], + "spans": [ + { + "bbox": [ + 123, + 284, + 526, + 296 + ], + "type": "text", + "content": "* Figure S.15 Agentic-Tx tool use frequency per question for chemical preference questions." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 298, + 364, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 298, + 364, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 298, + 364, + 310 + ], + "type": "text", + "content": "- Proof-of-concept example using TxGemma (Section D.4)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 123, + 312, + 541, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 312, + 541, + 336 + ], + "spans": [ + { + "bbox": [ + 123, + 312, + 541, + 336 + ], + "type": "text", + "content": "* Figure S.3 Illustration of a possible application of TxGemma to end-to-end therapeutic development." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 160, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 160, + 83 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 160, + 83 + ], + "type": "text", + "content": "B Data details" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 95, + 541, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 95, + 541, + 120 + ], + "spans": [ + { + "bbox": [ + 67, + 95, + 541, + 120 + ], + "type": "text", + "content": "This section provides a breakdown of the tasks used in our study, including information on excluded tasks and the size of training, validation, and test sets for binary classification, regression, and generation tasks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 123, + 542, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 123, + 542, + 256 + ], + "spans": [ + { + "bbox": [ + 67, + 123, + 542, + 256 + ], + "type": "text", + "content": "As previously mentioned, we excluded a small number of tasks from TDC for various reasons. Table S.1 provides an overview of the excluded tasks and the rationale behind their exclusion. The primary reasons for exclusion were the tasks' relevance to the study, limitations of LLMs, and specific data characteristics, such as the absence of clear metrics or redundancy. For instance, tasks like QM7b, QM8, and QM9, which focus on predicting quantum properties, were not directly relevant to the study's focus on therapeutic development. Similarly, IEDB Jespersen and PDB Jespersen were excluded due to their small size and the complexity of implementing token prediction, as opposed to binary classification, within an LLM framework. Tasks such as DrugBank DDI, TWOSIDES, and USPTO Catalyst posed challenges due to the large number of potential labels, making them difficult for LLMs to process effectively. MOSES, ZINC, and ChEMBL were excluded because they lacked well-defined evaluation metrics. Finally, USPTO 50K and USPTO Reaction were excluded as they either overlapped with or were subsets of the USPTO task." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 260, + 541, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 260, + 541, + 345 + ], + "spans": [ + { + "bbox": [ + 67, + 260, + 541, + 345 + ], + "type": "text", + "content": "Tables S.2 and S.3 specify the number of samples in the training, validation, and test sets for the included binary classification, regression, and generation tasks, respectively. Substantial variability in task sizes across different tasks is shown in these tables. The binary classification tasks range from 196 to 1,406,988 samples, while the regression and generation tasks range from 345 to 775,767 samples. This variability highlights the diverse data availability landscape across various tasks. Figure S.1 provides a visual representation of the distribution of TDC task sizes, aggregated across train, validation, and test sets. For tasks encompassing multiple subtasks, like ToxCast, the task size is computed by summing the sizes of each individual dataset." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 203, + 358, + 409, + 491 + ], + "blocks": [ + { + "bbox": [ + 203, + 358, + 409, + 491 + ], + "lines": [ + { + "bbox": [ + 203, + 358, + 409, + 491 + ], + "spans": [ + { + "bbox": [ + 203, + 358, + 409, + 491 + ], + "type": "image", + "image_path": "3765c58d0679b30005a3a6896f3f4d67936c40efceb092bbd2821ab8663df4b7.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 502, + 542, + 539 + ], + "lines": [ + { + "bbox": [ + 67, + 502, + 542, + 539 + ], + "spans": [ + { + "bbox": [ + 67, + 502, + 542, + 539 + ], + "type": "text", + "content": "Figure S.1 | Distribution of TDC task sizes, aggregated over train, validation, and test sets. For tasks containing multiple datasets, such as ToxCast which contains data for more than 600 different assays, the task size is calculated by summing over the sizes for each dataset." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 557, + 542, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 557, + 542, + 691 + ], + "spans": [ + { + "bbox": [ + 67, + 557, + 542, + 691 + ], + "type": "text", + "content": "Tables S.4 and S.5 provide a brief description of the tasks, as well as the types of inputs (e.g. protein, small molecules, etc.). These tasks are diverse and encompass many different aspects of development. Some tasks corresponding to gene-disease association or protein-protein interaction prediction are useful for early-stage development, in order to identify mechanisms of disease and relevant targets. Predictions of antibody affinity, drug-target interaction, high-throughput screening, drug synergy are useful for intermediate development steps that involve proposing candidate therapeutics based on their interaction with a target. Predictions of toxicity, pharmacokinetics, and developability are useful for filtering candidates down based on favorable druglike properties. Predictions of clinical trial outcome, reaction yields, retrosynthesis are useful for late-stage development where understanding the likelihood of clinical trial approval and manufacturing potential are critical. There are also tasks that are highly specific for particular therapeutics types, which include predictions of CRISPR repair, peptide-MHC binding, miRNA-Target interaction, and TCR-epitope binding." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 694, + 541, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 694, + 541, + 719 + ], + "spans": [ + { + "bbox": [ + 67, + 694, + 541, + 719 + ], + "type": "text", + "content": "Binary classification tasks always output “(A)” or “(B)”, where “(A)” is a negative answer to the question which is specified in the prompt and “(B)” is a positive answer. Regression tasks output an integer between" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 543, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 543, + 109 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 543, + 109 + ], + "type": "text", + "content": "0 and 1000, which can be transformed back into the original task-specific label space. The output of the USPTO generation task is the SMILES string of the predicted molecules. Table S.6 lists the different types of inputs in the processed TDC data along with illustrative examples." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 72, + 164, + 538, + 388 + ], + "blocks": [ + { + "bbox": [ + 67, + 124, + 542, + 160 + ], + "lines": [ + { + "bbox": [ + 67, + 124, + 542, + 160 + ], + "spans": [ + { + "bbox": [ + 67, + 124, + 542, + 160 + ], + "type": "text", + "content": "Table S.1 | Excluded TDC tasks and reasons for exclusion. The tasks were excluded primarily due to their relevance to the study, limitations inherent to large language models (LLMs), and specific data characteristics, such as a lack of clear evaluation metrics or redundancy." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 72, + 164, + 538, + 388 + ], + "lines": [ + { + "bbox": [ + 72, + 164, + 538, + 388 + ], + "spans": [ + { + "bbox": [ + 72, + 164, + 538, + 388 + ], + "type": "table", + "html": "
Task NameReason for Exclusion
QM7bPrediction of quantum properties is not closely related to therapeutic development.
QM8Prediction of quantum properties is not closely related to therapeutic development.
QM9Prediction of quantum properties is not closely related to therapeutic development.
IEDB JespersenAmount of data is small, and token prediction is more difficult to implement in a LLM than binary classification.
PDB JespersenAmount of data is small, and token prediction is more difficult to implement in a LLM than binary classification.
DrugBank DDILarge number of possible labels is difficult to implement in a LLM.
TWOSIDESLarge number of possible labels is difficult to implement in a LLM.
USPTO CatalystLarge number of possible labels is difficult to implement in a LLM.
MOSESNo clear metric.
ZINCNo clear metric.
ChEMBLNo clear metric.
USPTO 50KSubset of USPTO.
USPTO ReactionSame data as USPTO.
", + "image_path": "4d2824d594ed2d7abd228cce1d0df9ff221c1c6c2479fa2ed84c0df88c6e7cac.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 46, + 190, + 566, + 651 + ], + "blocks": [ + { + "bbox": [ + 67, + 137, + 542, + 185 + ], + "lines": [ + { + "bbox": [ + 67, + 137, + 542, + 185 + ], + "spans": [ + { + "bbox": [ + 67, + 137, + 542, + 185 + ], + "type": "text", + "content": "Table S.2 | Number of samples in training, validation, and test sets for all binary classification tasks. The binary classification tasks range in size from a minimum of 196 samples (Carcinogens Lagunin) to a maximum of 1,406,988 samples (butkiewicz), highlighting the considerable variability in data availability across different tasks. The task type and split type are also indicated following the TDC classification and recommendation." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 46, + 190, + 566, + 651 + ], + "lines": [ + { + "bbox": [ + 46, + 190, + 566, + 651 + ], + "spans": [ + { + "bbox": [ + 46, + 190, + 566, + 651 + ], + "type": "table", + "html": "
Task NameTask TypeSplit TypeTraining SizeValidation SizeTest Size
AMESToxicityScaffold5,0937281,457
BBB MartinsPharmacokineticsScaffold1,421203406
Bioavailability MaPharmacokineticsScaffold1,344192384
CYP1A2 VeithPharmacokineticsScaffold8,8051,2572,517
CYP2C19 VeithPharmacokineticsScaffold8,8651,2662,534
CYP2C9 Substrate CarbonMangelsPharmacokineticsScaffold46767135
CYP2C9 VeithPharmacokineticsScaffold8,4631,2102,419
CYP2D6 Substrate CarbonMangelsPharmacokineticsScaffold46567135
CYP2D6 VeithPharmacokineticsScaffold9,1911,3132,626
CYP3A4 Substrate CarbonMangelsPharmacokineticsScaffold46867135
CYP3A4 VeithPharmacokineticsScaffold8,6281,2332,467
Carcinogens LaguninToxicityScaffold1962856
ClinToxToxicityScaffold1,034147297
DILIToxicityScaffold3255496
HIA HouPharmacokineticsScaffold40358117
HIV*High-throughput screeningScaffold28,7884,1128,227
HuRIProtein-protein interactionCold-start45,8559873,694
MHC1 IEDB IMGT NielsenPeptide-MHC bindingRandom130,19018,59837,197
MHC2 IEDB JensenPeptide-MHC bindingRandom93,99713,42826,856
PAMPA NCATSPharmacokineticsScaffold1,423203408
Pgp BrocatelliPharmacokineticsScaffold851122245
SARSCOV2 3CLPro DiamondHigh-throughput screeningScaffold61688176
SARSCoV2 Vitro TouretHigh-throughput screeningScaffold1,038148298
SAbDab ChenDevelopabilityRandom1,686241482
Skin ReactionToxicityScaffold2824082
Tox21ToxicityScaffold54,5567,79015,600
ToxCastToxicityScaffold1,073,279153,099307,282
butkiewiczHigh-throughput screeningRandom1,406,988200,99840,1997
hERGToxicityScaffold45766132
hERG KarimToxicityScaffold9,4111,3442,690
herg centralToxicityScaffold214,82530,68961,379
miRTarBasemiRNA-target interactionRandom559,59179,948159,889
phase1Clinical trial outcomeCold-start1,546258598
phase2Clinical trial outcomeCold-start5,7927161,282
phase3Clinical trial outcomeCold-start41,255321,084
weberTCR-epitope bindingCold-start33,0134,7489,421
", + "image_path": "b32c8ecb923b6ef6b0bb7d90b88ccffdb27bb8885edf80d5efadf8fd7a85e95f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 67, + 651, + 279, + 661 + ], + "lines": [ + { + "bbox": [ + 67, + 651, + 279, + 661 + ], + "spans": [ + { + "bbox": [ + 67, + 651, + 279, + 661 + ], + "type": "text", + "content": "* To predict whether compounds have Anti-HIV properties." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 66, + 213, + 545, + 638 + ], + "blocks": [ + { + "bbox": [ + 67, + 160, + 542, + 206 + ], + "lines": [ + { + "bbox": [ + 67, + 160, + 542, + 206 + ], + "spans": [ + { + "bbox": [ + 67, + 160, + 542, + 206 + ], + "type": "text", + "content": "Table S.3 | Number of samples in training, validation, and test sets for all regression and generation tasks. The regression and generation tasks vary significantly in size, ranging from a minimum of 345 samples (Protein SAbDab) to a maximum of 775,767 samples (USPTO). The task type and split type are also indicated following the TDC classification and recommendation." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 66, + 213, + 545, + 638 + ], + "lines": [ + { + "bbox": [ + 66, + 213, + 545, + 638 + ], + "spans": [ + { + "bbox": [ + 66, + 213, + 545, + 638 + ], + "type": "table", + "html": "
Task NameTask TypeSplit TypeTraining SizeValidation SizeTest Size
BindingDB PatentDrug-target interactionTemporal146,80036,63049,028
BindingDB ic50Drug-target interactionCold-start375,1277,53131,495
BindingDB kdDrug-target interactionCold-start19,0343762,321
BindingDB kiDrug-target interactionCold-start57,6561,1894,709
Buchwald HartwigReaction yieldsRandom2,768396791
Caco2 WangPharmacokineticsScaffold63791182
Clearance Hepatocyte AZPharmacokineticsScaffold848122243
Clearance Microsome AZPharmacokineticsScaffold770111221
DAVISDrug-target interactionCold-start12,4552661,064
DisGeNETGene-disease associationRandom39,4255,62111,200
DrugComb BlissDrug synergyCombination207,77229,61859,708
DrugComb CSSDrug synergyCombination207,77229,61859,708
DrugComb HSADrug synergyCombination207,77229,61859,708
DrugComb LoeweDrug synergyCombination207,77229,61859,708
DrugComb ZIPDrug synergyCombination207,77229,61859,708
GDSC1Drug responseRandom124,11717,73135,462
GDSC2Drug responseRandom64,8929,27018,541
Half Life ObachPharmacokineticsScaffold46567135
KIBADrug-target interactionCold-start59,3261,0424,524
LD50 ZhuToxicityScaffold5,1687391,478
LeenayCRISPR repairRandom5,3257601,520
Lipophilicity AstraZenecaPharmacokineticsScaffold2,940420840
OncoPolyPharmacologyDrug synergyCombination16,0142,3314,707
PPBR AZPharmacokineticsScaffold1,952279559
Protein SAbDabAntibody affinityRandom3454999
Solubility AqSolDBPharmacokineticsScaffold6,9889981,996
TAPDevelopabilityRandom845120240
USPTORetrosynthesisRandom775,767110,824221,648
USPTO YieldsReaction yieldsRandom597,54685,364170,728
VDss LombardoPharmacokineticsScaffold791113226
", + "image_path": "0bb72c425ec48ac5375fe73446d4fae9bb535296e49f9ec67b1508fd86755108.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 53, + 142, + 558, + 672 + ], + "blocks": [ + { + "bbox": [ + 67, + 114, + 542, + 138 + ], + "lines": [ + { + "bbox": [ + 67, + 114, + 542, + 138 + ], + "spans": [ + { + "bbox": [ + 67, + 114, + 542, + 138 + ], + "type": "text", + "content": "Table S.4 | Inputs and task descriptions for binary classification tasks. All output responses are either (A) for negative or (B) for positive." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 53, + 142, + 558, + 672 + ], + "lines": [ + { + "bbox": [ + 53, + 142, + 558, + 672 + ], + "spans": [ + { + "bbox": [ + 53, + 142, + 558, + 672 + ], + "type": "table", + "html": "
Task NameInputDescription
AMESSmall moleculeGiven a drug SMILES, predict whether it is mutagenic.
BBB MartinsSmall moleculeGiven a drug SMILES, predict whether it can cross the blood-brain barrier.
Bioavailability MaSmall moleculeGiven a drug SMILES, predict whether it is orally available.
CYP1A2 VeithSmall moleculeGiven a drug SMILES, predict whether it inhibits CYP1A2.
CYP2C19 VeithSmall moleculeGiven a drug SMILES, predict whether it inhibits CYP2C19.
CYP2C9 Substrate CarbonMangelsSmall moleculeGiven a drug SMILES, predict whether it is a substrate to CYP2C9.
CYP2C9 VeithSmall moleculeGiven a drug SMILES, predict whether it inhibits CYP2C9.
CYP2D6 Substrate CarbonMangelsSmall moleculeGiven a drug SMILES, predict whether it is a substrate to CYP2D6.
CYP2D6 VeithSmall moleculeGiven a drug SMILES, predict whether it inhibits CYP2D6.
CYP3A4 Substrate CarbonMangelsSmall moleculeGiven a drug SMILES, predict whether it is a substrate to CYP3A4.
CYP3A4 VeithSmall moleculeGiven a drug SMILES, predict whether it inhibits CYP3A4.
Carcinogens LaguninSmall moleculeGiven a drug SMILES, predict whether it is a carcinogen.
ClinToxSmall moleculeGiven a drug SMILES, predict whether it is toxic.
DILISmall moleculeGiven a drug SMILES, predict whether it can cause liver injury.
HIA HouSmall moleculeGiven a drug SMILES, predict whether it is absorbed in the human intestine.
HIV*Small moleculeGiven a drug SMILES, predict whether it has anti-HIV activity.
HuRIProteinGiven the amino acid sequences of two proteins, predict whether the proteins interact.
MHC1 IEDB IMGT NielsenProteinGiven the amino acid of the peptide and pseudo amino acid of MHC 1, predict whether the peptide binds to the MHC.
MHC2 IEDB JensenProteinGiven the amino acid of the peptide and pseudo amino acid of MHC 2, predict whether the peptide binds to the MHC.
PAMPA NCATSSmall moleculeGiven a drug SMILES, predict whether it is permeable in a PAMPA assay.
Pgp BroccatelliSmall moleculeGiven a drug SMILES, predict whether it inhibits Pgp.
SARSCOV2 3CLPro DiamondSmall moleculeGiven a drug SMILES, predict whether it binds SARS-CoV-2 3CL protease.
SARSCOV2 Vitro TouretSmall moleculeGiven a drug SMILES, predict whether it inhibits SARS-CoV-2 replication.
SAbDab ChenProteinGiven an antibody heavy chain and light chain sequence, whether it is developable.
Skin ReactionSmall moleculeGiven a drug SMILES, predict whether it can cause skin reaction.
Tox21Small moleculeGiven a drug SMILES, predict whether it is toxic in various assays.
ToxCastSmall moleculeGiven a drug SMILES, predict whether it is toxic in various assays.
butkiewiczSmall moleculeGiven a drug SMILES, predict whether it is active against various proteins.
hERGSmall moleculeGiven a drug SMILES, predict whether it blocks hERG.
hERG KarimSmall moleculeGiven a drug SMILES, predict whether it inhibits hERG.
herg centralSmall moleculeGiven a drug SMILES, predict whether it inhibits hERG.
miRTarBase phase1Nucleic acid & proteinGiven the miRNA mature and target amino acid, predict whether they interact.
phase2Small molecule & diseaseGiven a drug SMILES and disease, predict whether the phase 1 trial will be approved.
phase3Small molecule & diseaseGiven a drug SMILES and disease, predict whether the phase 2 trial will be approved.
weberSmall molecule & diseaseGiven a drug SMILES and disease, predict whether the phase 3 trial will be approved.
ProteinGiven the amino acid of the epitope and a T-cell receptor (amino acid of the hypervariable CDR3 loop), predict whether the epitope binds to the TCR.
", + "image_path": "9fb69e97ac4ee81f26e53d78fe24373e98bbbb61c0bd4b22e0baf2713a888d55.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 68, + 674, + 280, + 684 + ], + "lines": [ + { + "bbox": [ + 68, + 674, + 280, + 684 + ], + "spans": [ + { + "bbox": [ + 68, + 674, + 280, + 684 + ], + "type": "text", + "content": "* To predict whether compounds have Anti-HIV properties." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 187, + 564, + 665 + ], + "blocks": [ + { + "bbox": [ + 67, + 135, + 542, + 182 + ], + "lines": [ + { + "bbox": [ + 67, + 135, + 542, + 182 + ], + "spans": [ + { + "bbox": [ + 67, + 135, + 542, + 182 + ], + "type": "text", + "content": "Table S.5 | Inputs and task descriptions for regression and generation tasks. Regression task outputs are integers between 0 and 1000, which represents a binned transformation of the original numeric label. On evaluation, the integer output is transformed back into the original numeric label space. For the USPTO generation task, the output is the SMILES string of the predicted set of small molecules." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 47, + 187, + 564, + 665 + ], + "lines": [ + { + "bbox": [ + 47, + 187, + 564, + 665 + ], + "spans": [ + { + "bbox": [ + 47, + 187, + 564, + 665 + ], + "type": "table", + "html": "
Task NameInputDescription
BindingDB PatentProtein & small moleculeGiven the target amino acid and drug SMILES, predict their binding affinity.
BindingDB ic50ProteinGiven the target amino acid and drug SMILES, predict their IC50.
BindingDB kdProteinGiven the target amino acid and drug SMILES, predict their Kd.
BindingDB kiProteinGiven the target amino acid and drug SMILES, predict their Ki.
Buchwald HartwigSmall moleculeGiven a product, a catalyst, and a reactant SMILES, predict the reaction yield.
Caco2 WangSmall moleculeGiven a drug SMILES, predict the cell effective permeability.
Clearance Hepatocyte AZSmall moleculeGiven a drug SMILES, predict the activity of hepatocyte clearance.
Clearance Microsome AZSmall moleculeGiven a drug SMILES, predict the activity of microsome clearance.
DAVISProtein & small moleculeGiven the target amino acid and drug SMILES, predict their binding affinity.
DisGeNETProtein & diseaseGiven the disease description and the amino acid of the gene, predict their association.
DrugComb BlissSmall molecule & cell lineGiven two drug SMILESs and a cell line description, predict the drug synergy level.
DrugComb CSSSmall molecule & cell lineGiven two drug SMILESs and a cell line description, predict the drug synergy level.
DrugComb HSASmall molecule & cell lineGiven two drug SMILESs and a cell line description, predict the drug synergy level.
DrugComb LoeweSmall molecule & cell lineGiven two drug SMILESs and a cell line description, predict the drug synergy level.
DrugComb ZIPSmall molecule & cell lineGiven two drug SMILESs and a cell line description, predict the drug synergy level.
GDSC1Small molecule & cell lineGiven a drug SMILES and a cell line description, predict the drug sensitivity level.
GDSC2Small molecule & cell lineGiven a drug SMILES and a cell line description, predict the drug sensitivity level.
Half Life ObachSmall moleculeGiven a drug SMILES, predict the half life duration.
KIBAProtein & small moleculeGiven the target amino acid and drug SMILES, predict their binding affinity.
LD50 ZhuSmall moleculeGiven a drug SMILES, predict its LD50 toxicity.
LeenayNucleic acidGiven a GuideSeq sequence, predict various properties.
Lipophilicity AstraZenecaSmall moleculeGiven a drug SMILES, predict the lipophilicity.
OncoPolyPharmacologyCell line & small moleculeGiven two drug SMILESs and a cell line description, predict the drug synergy level.
PPBR AZSmall moleculeGiven a drug SMILES, predict the plasma protein binding rate.
Protein SAbDabProteinGiven the amino acid of the antibody and antigen, predict the binding affinity.
Solubility AqSolDBSmall moleculeGiven a drug SMILES, predict the activity of solubility.
TAPProteinGiven an antibody heavy chain and light chain sequence, predict its CDR length.
USPTOSmall moleculeGiven the product SMILES, generate the reactant SMILESs.
USPTO YieldsSmall moleculeGiven a catalyst SMILES, reactant SMILES, and product SMILES, predict the yield.
VDss LombardoSmall moleculeGiven a drug SMILES, predict the volume of distributon.
", + "image_path": "6c106798dfbe627a04549474626d78648a113dce271a2871c66142c409a94aba.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 72, + 369, + 538, + 467 + ], + "blocks": [ + { + "bbox": [ + 67, + 316, + 542, + 366 + ], + "lines": [ + { + "bbox": [ + 67, + 316, + 542, + 366 + ], + "spans": [ + { + "bbox": [ + 67, + 316, + 542, + 366 + ], + "type": "text", + "content": "Table S.6 | Types of drugs and targets found in our data. Features found in our data as well as their textual representation and an illustrative example. Protein sequences are divided into several subtypes: some proteins and peptides are represented using their full amino acid sequence whereas MHC molecules are represented using the amino acid pseudo-sequences that only use residues in contact with a peptide, and TCRs only use CDR3 hypervariable loops." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 72, + 369, + 538, + 467 + ], + "lines": [ + { + "bbox": [ + 72, + 369, + 538, + 467 + ], + "spans": [ + { + "bbox": [ + 72, + 369, + 538, + 467 + ], + "type": "table", + "html": "
Representation TypeRepresentationExample
Small MoleculesSMILES stringCN1C(=O)CN=C(C2=CCCCC2)c2cc(Cl)ccc21
Amino Acid: Proteins and peptidesAmino acid sequencesQLADETLLKV
Amino Acid: MHC moleculesPseudo-sequences †YFAMYGEKVAHTHVDTLYVRYHYYTWAEWAYTWY
Amino Acid: T cell receptorsCDR3 hypervariable loopsCSASEGTSSYEQYF
Nucleic acidNucleotide sequenceACAGCCCAGCAGUUUAUCACGGG
DiseaseEnglish textChronic myeloproliferative disease
Cell LineEnglish textNU-1, stomach cell sourced from cancer
", + "image_path": "d7dcc741662b146f89dbd3da83f6492c95c1c43ce8fb40ae16b5fbee3c7eaae9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 68, + 472, + 230, + 481 + ], + "lines": [ + { + "bbox": [ + 68, + 472, + 230, + 481 + ], + "spans": [ + { + "bbox": [ + 68, + 472, + 230, + 481 + ], + "type": "text", + "content": "† Only for residues in contact with a peptide." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 540, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 540, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 540, + 751 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 177, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 177, + 83 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 177, + 83 + ], + "type": "text", + "content": "C Method details" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 95, + 544, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 95, + 544, + 240 + ], + "spans": [ + { + "bbox": [ + 66, + 95, + 544, + 240 + ], + "type": "text", + "content": "This section elaborates on the modeling choices employed in the development of TxGemma. Tables S.7 and S.8 illustrate prompts used for binary classification, regression, and generation tasks, showcasing the input structure for the model including the instructions and context provided to the model. Table S.9 provide a concrete example of few-shot prompting applied to a binary classification task using 10 examples with nearest-neighbor shots. Each dataset in our data is structured as a text prompt, consisting of instructions, context, a question, and the corresponding answer. To provide relevant background, we created 2-3 sentence contexts based on TDC dataset descriptions and literature searches. Prompts used for predicting adverse events in clinical trials based on the TrialBench dataset [1] are shown in Table S.10. To illustrate the reasoning process of Agentic-Tx, Table S.11 provides an example of the steps taken to answer a chemical preference question from ChemBench. Table S.12 also provides a comprehensive list of the tools available of Agentic-Tx. Section C.1 provides details of the Wilcoxon signed-rank test used to assess the performance of our models across all tasks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 66, + 244, + 543, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 244, + 543, + 293 + ], + "spans": [ + { + "bbox": [ + 66, + 244, + 543, + 293 + ], + "type": "text", + "content": "We utilize random data points from the training set for few-shot learning during training. Although we use nearest neighbor shots for evaluation, we opt for random shots during training due to the higher intra-set similarity observed within the training data compared to between training and test sets, as illustrated in Figure S.2." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 190, + 306, + 421, + 452 + ], + "blocks": [ + { + "bbox": [ + 190, + 306, + 421, + 452 + ], + "lines": [ + { + "bbox": [ + 190, + 306, + 421, + 452 + ], + "spans": [ + { + "bbox": [ + 190, + 306, + 421, + 452 + ], + "type": "image", + "image_path": "a97b57e1e6b69f48df9b46566fd0c5232a634bcaa152ee1d7db532c7273836dd.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 66, + 463, + 545, + 499 + ], + "lines": [ + { + "bbox": [ + 66, + 463, + 545, + 499 + ], + "spans": [ + { + "bbox": [ + 66, + 463, + 545, + 499 + ], + "type": "text", + "content": "Figure S.2 | Distribution of the Tanimoto similarities for the 10 nearest neighbors in the AMES task. Nearest neighbors are calculated from the training set for training and validation sets, and from both the training and validation sets for the test set." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 523, + 269, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 523, + 269, + 537 + ], + "spans": [ + { + "bbox": [ + 67, + 523, + 269, + 537 + ], + "type": "text", + "content": "C.1 Aggregated method comparison" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 66, + 544, + 543, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 544, + 543, + 570 + ], + "spans": [ + { + "bbox": [ + 66, + 544, + 543, + 570 + ], + "type": "text", + "content": "For a pair of performances " + }, + { + "bbox": [ + 66, + 544, + 543, + 570 + ], + "type": "inline_equation", + "content": "(x_{i},y_{i})" + }, + { + "bbox": [ + 66, + 544, + 543, + 570 + ], + "type": "text", + "content": " of a task " + }, + { + "bbox": [ + 66, + 544, + 543, + 570 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 66, + 544, + 543, + 570 + ], + "type": "text", + "content": ", the test statistic of the Wilcoxon signed-rank test is calculated as the minimum of the positive-rank sum " + }, + { + "bbox": [ + 66, + 544, + 543, + 570 + ], + "type": "inline_equation", + "content": "(W^{+})" + }, + { + "bbox": [ + 66, + 544, + 543, + 570 + ], + "type": "text", + "content": " and the negative-rank sum " + }, + { + "bbox": [ + 66, + 544, + 543, + 570 + ], + "type": "inline_equation", + "content": "(W^{-})" + }, + { + "bbox": [ + 66, + 544, + 543, + 570 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 271, + 582, + 542, + 608 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 271, + 582, + 542, + 608 + ], + "spans": [ + { + "bbox": [ + 271, + 582, + 542, + 608 + ], + "type": "interline_equation", + "content": "W ^ {+} = \\sum_ {X _ {i} > 0} R _ {i} \\tag {1}", + "image_path": "096e92d1c27b2658abaac8299e572989ee024c86006f93ac8bf5b2c2c46548d4.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 271, + 620, + 542, + 647 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 271, + 620, + 542, + 647 + ], + "spans": [ + { + "bbox": [ + 271, + 620, + 542, + 647 + ], + "type": "interline_equation", + "content": "W ^ {-} = \\sum_ {X _ {i} < 0} R _ {i} \\tag {2}", + "image_path": "83c7e9c7794221b2cb751ba8e52d2d2e019f0563012fa1743854d7b8454bd866.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 66, + 656, + 543, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 656, + 543, + 704 + ], + "spans": [ + { + "bbox": [ + 66, + 656, + 543, + 704 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 66, + 656, + 543, + 704 + ], + "type": "inline_equation", + "content": "X_{i} = x_{i} - y_{i}" + }, + { + "bbox": [ + 66, + 656, + 543, + 704 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 66, + 656, + 543, + 704 + ], + "type": "inline_equation", + "content": "R_{i}" + }, + { + "bbox": [ + 66, + 656, + 543, + 704 + ], + "type": "text", + "content": " is the rank of " + }, + { + "bbox": [ + 66, + 656, + 543, + 704 + ], + "type": "inline_equation", + "content": "|x_{i} - y_{i}|" + }, + { + "bbox": [ + 66, + 656, + 543, + 704 + ], + "type": "text", + "content": ". In order to account for the differences in magnitudes for MAE and MSE metrics, we normalized all performances by the mean of the performances from both models. We also reversed the sign of MAEs and MSEs because lower MAEs and MSEs correspond to better performances." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 542, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 542, + 752 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 542, + 752 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 119, + 323, + 130 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 119, + 323, + 130 + ], + "spans": [ + { + "bbox": [ + 68, + 119, + 323, + 130 + ], + "type": "text", + "content": "Table S.7 | Example of prompts for binary classification tasks." + } + ] + } + ], + "index": 0, + "type": "text" + }, + { + "bbox": [ + 76, + 148, + 334, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 148, + 334, + 158 + ], + "spans": [ + { + "bbox": [ + 76, + 148, + 334, + 158 + ], + "type": "text", + "content": "Instructions: Answer the following question about drug properties." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 75, + 158, + 534, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 158, + 534, + 189 + ], + "spans": [ + { + "bbox": [ + 75, + 158, + 534, + 189 + ], + "type": "text", + "content": "Context: As a membrane separating circulating blood and brain extracellular fluid, the blood-brain barrier (BBB) is the protection layer that blocks most foreign drugs. Thus the ability of a drug to penetrate the barrier to deliver to the site of action forms a crucial challenge in development of drugs for central nervous system." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 76, + 190, + 302, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 190, + 302, + 199 + ], + "spans": [ + { + "bbox": [ + 76, + 190, + 302, + 199 + ], + "type": "text", + "content": "Question: Given a drug SMILES string, predict whether it" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 77, + 200, + 261, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 200, + 261, + 209 + ], + "spans": [ + { + "bbox": [ + 77, + 200, + 261, + 209 + ], + "type": "text", + "content": "(A) does not cross the BBB (B) crosses the BBB" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 77, + 210, + 313, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 210, + 313, + 220 + ], + "spans": [ + { + "bbox": [ + 77, + 210, + 313, + 220 + ], + "type": "text", + "content": "Drug SMILES: " + }, + { + "bbox": [ + 77, + 210, + 313, + 220 + ], + "type": "inline_equation", + "content": "\\mathrm{CN1C(=O)CN = C(C2 = CCCCC2)c2cc(Cl)ccc21}" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 77, + 221, + 132, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 221, + 132, + 231 + ], + "spans": [ + { + "bbox": [ + 77, + 221, + 132, + 231 + ], + "type": "text", + "content": "Answer: (B)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 76, + 249, + 358, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 249, + 358, + 258 + ], + "spans": [ + { + "bbox": [ + 76, + 249, + 358, + 258 + ], + "type": "text", + "content": "Instructions: Answer the following question about peptide-MHC binding." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 75, + 259, + 533, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 259, + 533, + 320 + ], + "spans": [ + { + "bbox": [ + 75, + 259, + 533, + 320 + ], + "type": "text", + "content": "Context: In the human body, T cells monitor the existing peptides and trigger an immune response if the peptide is foreign. To decide whether or not if the peptide is not foreign, the peptide must bind to a major histocompatibility complex (MHC) molecule. Therefore, predicting peptide-MHC binding affinity is pivotal for determining immunogenicity. In some experiments, the peptide binding is measured against cells that express multiple MHCs, so the peptide could be binding any one of the possible MHCs. Class 1 MHC molecules bind to peptides that are usually 8-14 amino acids long and activate CD8 T cells." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 75, + 322, + 533, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 322, + 533, + 342 + ], + "spans": [ + { + "bbox": [ + 75, + 322, + 533, + 342 + ], + "type": "text", + "content": "Question: Given the amino acid sequence of the peptide and possible pseudo amino acid sequences of MHC 1, predict whether the peptide" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 76, + 342, + 337, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 342, + 337, + 353 + ], + "spans": [ + { + "bbox": [ + 76, + 342, + 337, + 353 + ], + "type": "text", + "content": "(A) does not bind to any of the MHCs (B) binds to any of the MHCs" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 77, + 354, + 252, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 354, + 252, + 363 + ], + "spans": [ + { + "bbox": [ + 77, + 354, + 252, + 363 + ], + "type": "text", + "content": "Peptide amino acid sequence: QLADETLLKV" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 77, + 363, + 416, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 363, + 416, + 373 + ], + "spans": [ + { + "bbox": [ + 77, + 363, + 416, + 373 + ], + "type": "text", + "content": "Possible MHC pseudosequences: YFAMYGEKAVTHVDTLYVRYHYTTYEAWAYTWY" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 77, + 374, + 132, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 374, + 132, + 384 + ], + "spans": [ + { + "bbox": [ + 77, + 374, + 132, + 384 + ], + "type": "text", + "content": "Answer: (B)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 76, + 402, + 380, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 402, + 380, + 411 + ], + "spans": [ + { + "bbox": [ + 76, + 402, + 380, + 411 + ], + "type": "text", + "content": "Instructions: Answer the following question about miRNA protein interactions." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 75, + 412, + 533, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 412, + 533, + 443 + ], + "spans": [ + { + "bbox": [ + 75, + 412, + 533, + 443 + ], + "type": "text", + "content": "Context: MicroRNAs (miRNAs) are, small non-coding RNAs with 18-25 nucleotides, which are central regulators at the post-transcriptional level in both animals and plants. Perfect or near-perfect complementary binding of miRNAs and their target mRNA negatively regulates gene expression by accelerating mRNA degradation or suppressing mRNA translation." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 76, + 444, + 438, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 444, + 438, + 453 + ], + "spans": [ + { + "bbox": [ + 76, + 444, + 438, + 453 + ], + "type": "text", + "content": "Question: Given the miRNA mature sequence and target amino acid sequence, predict whether" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 76, + 454, + 370, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 454, + 370, + 464 + ], + "spans": [ + { + "bbox": [ + 76, + 454, + 370, + 464 + ], + "type": "text", + "content": "(A) the miRNA and target do not interact (B) the miRNA and target interact" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 77, + 464, + 282, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 464, + 282, + 474 + ], + "spans": [ + { + "bbox": [ + 77, + 464, + 282, + 474 + ], + "type": "text", + "content": "miRNA sequence: UUCCUGUCAGCCGUGGGUGCC" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 75, + 475, + 533, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 475, + 533, + 505 + ], + "spans": [ + { + "bbox": [ + 75, + 475, + 533, + 505 + ], + "type": "text", + "content": "Target amino acid sequence: MSVNMDELRHQVMINQFVLAAGCAADQAKQLLQAAHWQFETALSTFFQET-NIPNSHHHHQMMCTPSNTPATPPNFPDALAMFSKLRASEGLQSSNSPMTAAACSPANFSPFWASSPPSHQAPWIP-PSSPTTFHLHRPQPTWPPGAQQGGAQQKAMAAMDGQR" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 77, + 506, + 132, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 506, + 132, + 516 + ], + "spans": [ + { + "bbox": [ + 77, + 506, + 132, + 516 + ], + "type": "text", + "content": "Answer: (A)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 76, + 534, + 324, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 534, + 324, + 544 + ], + "spans": [ + { + "bbox": [ + 76, + 534, + 324, + 544 + ], + "type": "text", + "content": "Instructions: Answer the following question about clinical trials." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 75, + 544, + 533, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 544, + 533, + 606 + ], + "spans": [ + { + "bbox": [ + 75, + 544, + 533, + 606 + ], + "type": "text", + "content": "Context: Clinical trial is the most time and cost-consuming step in the drug discovery process. Phase 1 clinical trials test the safety and basic properties of a new drug or treatment in a small group of people for the first time. Optimizing and designing trials with machine learning could drastically lead to the speedup of delivery of life-saving therapeutics to patients. Clinical trial outcome prediction is a machine learning task that aims to forecast the outcome of clinical trials, such as the approval rate of a drug or treatment. It utilizes various clinical trial features, including the drug's molecular structure and patient disease." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 76, + 607, + 377, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 607, + 377, + 617 + ], + "spans": [ + { + "bbox": [ + 76, + 607, + 377, + 617 + ], + "type": "text", + "content": "Question: Given a drug SMILES string and disease, predict if the phase 1 trial" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 77, + 618, + 267, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 618, + 267, + 628 + ], + "spans": [ + { + "bbox": [ + 77, + 618, + 267, + 628 + ], + "type": "text", + "content": "(A) would not be approved (B) would be approved" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 77, + 628, + 430, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 628, + 430, + 639 + ], + "spans": [ + { + "bbox": [ + 77, + 628, + 430, + 639 + ], + "type": "text", + "content": "Drug SMILES: COC1=NC(N)=NC2=C1N=CN2[C@@H]1O[C@H](CO)[C@@H](O)[C@@H]1O" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 77, + 639, + 239, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 639, + 239, + 648 + ], + "spans": [ + { + "bbox": [ + 77, + 639, + 239, + 648 + ], + "type": "text", + "content": "Disease: Chronic myeloproliferative disease" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 77, + 649, + 132, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 649, + 132, + 659 + ], + "spans": [ + { + "bbox": [ + 77, + 649, + 132, + 659 + ], + "type": "text", + "content": "Answer: (A)" + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 140, + 346, + 152 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 140, + 346, + 152 + ], + "spans": [ + { + "bbox": [ + 68, + 140, + 346, + 152 + ], + "type": "text", + "content": "Table S.8 | Example of prompts for regression and generation tasks." + } + ] + } + ], + "index": 0, + "type": "text" + }, + { + "bbox": [ + 76, + 169, + 334, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 169, + 334, + 178 + ], + "spans": [ + { + "bbox": [ + 76, + 169, + 334, + 178 + ], + "type": "text", + "content": "Instructions: Answer the following question about drug properties." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 75, + 179, + 533, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 179, + 533, + 209 + ], + "spans": [ + { + "bbox": [ + 75, + 179, + 533, + 209 + ], + "type": "text", + "content": "Context: The human colon epithelial cancer cell line, Caco-2, is used as an in vitro model to simulate the human intestinal tissue. The experimental result on the rate of drug passing through the Caco-2 cells can approximate the rate at which the drug permeates through the human intestinal tissue." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 75, + 211, + 533, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 211, + 533, + 231 + ], + "spans": [ + { + "bbox": [ + 75, + 211, + 533, + 231 + ], + "type": "text", + "content": "Question: Given a drug SMILES string, predict its normalized Caco-2 cell effective permeability from 000 to 1000, where 000 is minimum permeability and 1000 is maximum permeability." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 77, + 232, + 306, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 232, + 306, + 241 + ], + "spans": [ + { + "bbox": [ + 77, + 232, + 306, + 241 + ], + "type": "text", + "content": "Drug SMILES: " + }, + { + "bbox": [ + 77, + 232, + 306, + 241 + ], + "type": "inline_equation", + "content": "\\mathrm{O} = \\mathrm{C}(\\mathrm{O})\\mathrm{{COC}}\\left( { = \\mathrm{O}}\\right) \\mathrm{{Cc}}1\\text{ccc}\\mathrm{{cc}}1\\mathrm{{Nc}}1\\mathrm{{c}}\\left( \\mathrm{{Cl}}\\right) \\mathrm{{ccc}}1\\mathrm{{Cl}}" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 77, + 242, + 132, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 242, + 132, + 251 + ], + "spans": [ + { + "bbox": [ + 77, + 242, + 132, + 251 + ], + "type": "text", + "content": "Answer: 788" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 76, + 270, + 331, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 270, + 331, + 280 + ], + "spans": [ + { + "bbox": [ + 76, + 270, + 331, + 280 + ], + "type": "text", + "content": "Instructions: Answer the following question about drug responses." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 75, + 281, + 533, + 311 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 281, + 533, + 311 + ], + "spans": [ + { + "bbox": [ + 75, + 281, + 533, + 311 + ], + "type": "text", + "content": "Context: The same drug compound could have various levels of responses in different patients. To design drug for individual or a group with certain characteristics is the central goal of precision medicine. In experiments, IC50s of drugs were measured against cancer cell lines." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 75, + 312, + 534, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 312, + 534, + 331 + ], + "spans": [ + { + "bbox": [ + 75, + 312, + 534, + 331 + ], + "type": "text", + "content": "Question: Given a drug SMILES string and a cell line description, predict the normalized drug sensitivity from 000 to 1000, where 000 is minimum drug sensitivity and 1000 is maximum drug sensitivity." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 76, + 332, + 386, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 332, + 386, + 342 + ], + "spans": [ + { + "bbox": [ + 76, + 332, + 386, + 342 + ], + "type": "text", + "content": "Drug SMILES: " + }, + { + "bbox": [ + 76, + 332, + 386, + 342 + ], + "type": "inline_equation", + "content": "\\mathrm{CN1C = C(C2 = CC = CC = C21) / C = C\\backslash 3 / C4 = C(C = CC = N4)NC3 = O}" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 76, + 343, + 312, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 343, + 312, + 353 + ], + "spans": [ + { + "bbox": [ + 76, + 343, + 312, + 353 + ], + "type": "text", + "content": "Cell line description: SNU-1, stomach cell sourced from cancer" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 77, + 354, + 132, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 354, + 132, + 362 + ], + "spans": [ + { + "bbox": [ + 77, + 354, + 132, + 362 + ], + "type": "text", + "content": "Answer: 615" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 76, + 381, + 364, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 381, + 364, + 391 + ], + "spans": [ + { + "bbox": [ + 76, + 381, + 364, + 391 + ], + "type": "text", + "content": "Instructions: Answer the following question about drug target interactions." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 75, + 392, + 533, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 392, + 533, + 443 + ], + "spans": [ + { + "bbox": [ + 75, + 392, + 533, + 443 + ], + "type": "text", + "content": "Context: Drug-target binding is the physical interaction between a drug and a specific biological molecule, such as a protein or enzyme. This interaction is essential for the drug to exert its pharmacological effect. The strength of the drug-target binding is determined by the binding affinity, which is a measure of how tightly the drug binds to the target. Kd is the dissociation constant of a drug-target complex. It is the concentration of drug at which half of the drug-target complexes have dissociated. A lower Kd value indicates a stronger binding affinity." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 75, + 444, + 533, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 444, + 533, + 463 + ], + "spans": [ + { + "bbox": [ + 75, + 444, + 533, + 463 + ], + "type": "text", + "content": "Question: Given the target amino acid sequence and compound SMILES string, predict their normalized binding affinity Kd from 000 to 1000, where 000 is minimum Kd and 1000 is maximum Kd." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 76, + 464, + 287, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 464, + 287, + 474 + ], + "spans": [ + { + "bbox": [ + 76, + 464, + 287, + 474 + ], + "type": "text", + "content": "Drug SMILES: " + }, + { + "bbox": [ + 76, + 464, + 287, + 474 + ], + "type": "inline_equation", + "content": "\\mathrm{O = S(=O)(O)c1cccc2ccc(Nc3cccccc)3c12}" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 75, + 475, + 534, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 475, + 534, + 505 + ], + "spans": [ + { + "bbox": [ + 75, + 475, + 534, + 505 + ], + "type": "text", + "content": "Target amino acid sequence: MATVQQLEGRWRLVDSKGFDEYMKELGVIALRKMGAMKPDCIITCDGKNLTIKTESTLKITTQFSCTLGEKFETTADGRKTQTVCNFTDGALVHQWEWDGKESTITRKLKDGLVVECVMNNVTCTRIYEKVE" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 77, + 506, + 132, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 506, + 132, + 515 + ], + "spans": [ + { + "bbox": [ + 77, + 506, + 132, + 515 + ], + "type": "text", + "content": "Answer: 397" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 76, + 534, + 310, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 534, + 310, + 544 + ], + "spans": [ + { + "bbox": [ + 76, + 534, + 310, + 544 + ], + "type": "text", + "content": "Instructions: Answer the following question about reactions." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 75, + 544, + 533, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 544, + 533, + 586 + ], + "spans": [ + { + "bbox": [ + 75, + 544, + 533, + 586 + ], + "type": "text", + "content": "Context: Retrosynthesis is the process of finding a set of reactants that can synthesize a target molecule, i.e., product, which is a fundamental task in drug manufacturing. The target is recursively transformed into simpler precursor molecules until commercially available \"starting\" molecules are identified. In a data sample, there is only one product molecule, reactants can be one or multiple molecules." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 76, + 586, + 381, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 586, + 381, + 597 + ], + "spans": [ + { + "bbox": [ + 76, + 586, + 381, + 597 + ], + "type": "text", + "content": "Question: Given a product SMILES string, predict the reactant SMILES string." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 76, + 597, + 492, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 597, + 492, + 607 + ], + "spans": [ + { + "bbox": [ + 76, + 597, + 492, + 607 + ], + "type": "text", + "content": "Product SMILES: [CH2:12]1[C:7]2([CH2:6][CH2:5][O:15][CH2:1][CH2:8]2)[CH2:13][CH2:14][O:10][C:11]1=[O:17]" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 76, + 607, + 534, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 607, + 534, + 628 + ], + "spans": [ + { + "bbox": [ + 76, + 607, + 534, + 628 + ], + "type": "text", + "content": "Answer: [CH:1]12B[CH:5]([CH2:6][CH2:7][CH2:8]1)CCC2.[O:10]1[CH2:14][CH2:13][CH2:12] [CH2:11]1.[OH:15].[Na+].[OH:17]O.CI" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "34" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 168, + 542, + 194 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 168, + 542, + 194 + ], + "spans": [ + { + "bbox": [ + 67, + 168, + 542, + 194 + ], + "type": "text", + "content": "Table S.9 | Example of a 10-shot prompt for a binary classification task. Shots are selected from nearest neighbors in the combined training and validation set (not the test set)." + } + ] + } + ], + "index": 0, + "type": "text" + }, + { + "bbox": [ + 77, + 209, + 334, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 209, + 334, + 220 + ], + "spans": [ + { + "bbox": [ + 77, + 209, + 334, + 220 + ], + "type": "text", + "content": "Instructions: Answer the following question about drug properties." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 76, + 220, + 534, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 220, + 534, + 251 + ], + "spans": [ + { + "bbox": [ + 76, + 220, + 534, + 251 + ], + "type": "text", + "content": "Context: As a membrane separating circulating blood and brain extracellular fluid, the blood-brain barrier (BBB) is the protection layer that blocks most foreign drugs. Thus the ability of a drug to penetrate the barrier to deliver to the site of action forms a crucial challenge in development of drugs for central nervous system." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 77, + 261, + 487, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 261, + 487, + 273 + ], + "spans": [ + { + "bbox": [ + 77, + 261, + 487, + 273 + ], + "type": "text", + "content": "Question: Given a drug SMILES string, predict whether it (A) does not cross the BBB (B) crosses the BBB" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 77, + 278, + 293, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 278, + 293, + 289 + ], + "spans": [ + { + "bbox": [ + 77, + 278, + 293, + 289 + ], + "type": "text", + "content": "Drug SMILES: " + }, + { + "bbox": [ + 77, + 278, + 293, + 289 + ], + "type": "inline_equation", + "content": "\\mathrm{CN1C(=O)CN = C(c2cccccc)2c2cc(Cl)ccc21}" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 77, + 290, + 125, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 290, + 125, + 300 + ], + "spans": [ + { + "bbox": [ + 77, + 290, + 125, + 300 + ], + "type": "text", + "content": "Answer: (B)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 77, + 310, + 298, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 310, + 298, + 321 + ], + "spans": [ + { + "bbox": [ + 77, + 310, + 298, + 321 + ], + "type": "text", + "content": "Drug SMILES: " + }, + { + "bbox": [ + 77, + 310, + 298, + 321 + ], + "type": "inline_equation", + "content": "\\mathrm{CN1C(=O)CN = C(c2cccccc2F)c2cc(Cl)ccc21}" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 77, + 322, + 125, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 322, + 125, + 331 + ], + "spans": [ + { + "bbox": [ + 77, + 322, + 125, + 331 + ], + "type": "text", + "content": "Answer: (B)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 77, + 341, + 290, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 341, + 290, + 352 + ], + "spans": [ + { + "bbox": [ + 77, + 341, + 290, + 352 + ], + "type": "text", + "content": "Drug SMILES: " + }, + { + "bbox": [ + 77, + 341, + 290, + 352 + ], + "type": "inline_equation", + "content": "\\mathrm{CN1C(=S)CN = C(c2cccccc)2c2cc(Cl)ccc21}" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 77, + 353, + 125, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 353, + 125, + 362 + ], + "spans": [ + { + "bbox": [ + 77, + 353, + 125, + 362 + ], + "type": "text", + "content": "Answer: (B)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 77, + 372, + 337, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 372, + 337, + 384 + ], + "spans": [ + { + "bbox": [ + 77, + 372, + 337, + 384 + ], + "type": "text", + "content": "Drug SMILES: CP(C)(=O)CN1C(=O)CN=C(c2cccccc2)c2cc(Cl)ccc21" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 77, + 384, + 125, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 384, + 125, + 394 + ], + "spans": [ + { + "bbox": [ + 77, + 384, + 125, + 394 + ], + "type": "text", + "content": "Answer: (B)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 77, + 403, + 335, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 403, + 335, + 415 + ], + "spans": [ + { + "bbox": [ + 77, + 403, + 335, + 415 + ], + "type": "text", + "content": "Drug SMILES: " + }, + { + "bbox": [ + 77, + 403, + 335, + 415 + ], + "type": "inline_equation", + "content": "\\mathrm{CN1C(=O)CN = C(c2cccccc)2c2cc([N + ](=O)[O - ])ccc21}" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 77, + 415, + 125, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 415, + 125, + 425 + ], + "spans": [ + { + "bbox": [ + 77, + 415, + 125, + 425 + ], + "type": "text", + "content": "Answer: (B)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 77, + 435, + 341, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 435, + 341, + 446 + ], + "spans": [ + { + "bbox": [ + 77, + 435, + 341, + 446 + ], + "type": "text", + "content": "Drug SMILES: CCN(CC)CCN1C(=O)CN=C(c2cccccc2F)c2cc(Cl)ccc21" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 77, + 447, + 125, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 447, + 125, + 456 + ], + "spans": [ + { + "bbox": [ + 77, + 447, + 125, + 456 + ], + "type": "text", + "content": "Answer: (B)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 77, + 466, + 312, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 466, + 312, + 476 + ], + "spans": [ + { + "bbox": [ + 77, + 466, + 312, + 476 + ], + "type": "text", + "content": "Drug SMILES: " + }, + { + "bbox": [ + 77, + 466, + 312, + 476 + ], + "type": "inline_equation", + "content": "\\mathrm{O} = \\mathrm{C}1\\mathrm{{CN}} = \\mathrm{C}\\left( {c2\\text{ccc}cc2}\\right) c2\\mathrm{{cc}}\\left( \\mathrm{{Cl}}\\right) \\mathrm{{ccc}}2\\mathrm{\\;N}1\\mathrm{{CC}}1\\mathrm{{CC}}1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 77, + 477, + 125, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 477, + 125, + 487 + ], + "spans": [ + { + "bbox": [ + 77, + 477, + 125, + 487 + ], + "type": "text", + "content": "Answer: (B)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 77, + 497, + 312, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 497, + 312, + 508 + ], + "spans": [ + { + "bbox": [ + 77, + 497, + 312, + 508 + ], + "type": "text", + "content": "Drug SMILES: C#CCN1C(=O)CN=C(c2cccc2)c2cc(Cl)ccc21" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 77, + 509, + 125, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 509, + 125, + 518 + ], + "spans": [ + { + "bbox": [ + 77, + 509, + 125, + 518 + ], + "type": "text", + "content": "Answer: (B)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 77, + 529, + 322, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 529, + 322, + 540 + ], + "spans": [ + { + "bbox": [ + 77, + 529, + 322, + 540 + ], + "type": "text", + "content": "Drug SMILES: " + }, + { + "bbox": [ + 77, + 529, + 322, + 540 + ], + "type": "inline_equation", + "content": "\\mathrm{O} = \\mathrm{C}1\\mathrm{{CN}} = \\mathrm{C}\\left( {\\mathrm{c}2\\text{ccc} : 2}\\right) \\mathrm{c}2\\mathrm{{cc}}\\left( \\mathrm{{Cl}}\\right) \\mathrm{{ccc}}2\\mathrm{\\;N}1\\mathrm{{CC}}\\left( \\mathrm{F}\\right) \\left( \\mathrm{F}\\right) \\mathrm{F}" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 77, + 540, + 125, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 540, + 125, + 550 + ], + "spans": [ + { + "bbox": [ + 77, + 540, + 125, + 550 + ], + "type": "text", + "content": "Answer: (B)" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 77, + 560, + 360, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 560, + 360, + 571 + ], + "spans": [ + { + "bbox": [ + 77, + 560, + 360, + 571 + ], + "type": "text", + "content": "Drug SMILES: " + }, + { + "bbox": [ + 77, + 560, + 360, + 571 + ], + "type": "inline_equation", + "content": "\\mathrm{CCS}(\\mathrm{=O})(\\mathrm{=O})\\mathrm{CCN1C}(\\mathrm{=O})\\mathrm{CN} = \\mathrm{C}(\\mathrm{c2cccccc2F})\\mathrm{c2cc(Cl)ccc21}" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 77, + 571, + 125, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 571, + 125, + 581 + ], + "spans": [ + { + "bbox": [ + 77, + 571, + 125, + 581 + ], + "type": "text", + "content": "Answer: (B)" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 77, + 588, + 313, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 588, + 313, + 599 + ], + "spans": [ + { + "bbox": [ + 77, + 588, + 313, + 599 + ], + "type": "text", + "content": "Drug SMILES: " + }, + { + "bbox": [ + 77, + 588, + 313, + 599 + ], + "type": "inline_equation", + "content": "\\mathrm{CN1C(=O)CN = C(C2 = CCCCCC2)c2cc(Cl)ccc21}" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 77, + 600, + 132, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 600, + 132, + 609 + ], + "spans": [ + { + "bbox": [ + 77, + 600, + 132, + 609 + ], + "type": "text", + "content": "Answer: (B)" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "35" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 34 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 243, + 542, + 266 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 243, + 542, + 266 + ], + "spans": [ + { + "bbox": [ + 67, + 243, + 542, + 266 + ], + "type": "text", + "content": "Table S.10 | Example of prompts for predicting adverse events in clinical trials. The top prompt only provides drug SMILES strings while the bottom prompt also includes textual information about the clinical trial." + } + ] + } + ], + "index": 0, + "type": "text" + }, + { + "bbox": [ + 77, + 275, + 456, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 275, + 456, + 286 + ], + "spans": [ + { + "bbox": [ + 77, + 275, + 456, + 286 + ], + "type": "text", + "content": "From the following information about a clinical trial, predict whether it would have an adverse event." + } + ] + } + ], + "index": 1 + }, + { + "type": "code", + "bbox": [ + 77, + 296, + 430, + 318 + ], + "blocks": [ + { + "bbox": [ + 77, + 296, + 430, + 318 + ], + "lines": [ + { + "bbox": [ + 77, + 296, + 430, + 318 + ], + "spans": [ + { + "bbox": [ + 77, + 296, + 430, + 318 + ], + "type": "text", + "content": "Drug: CC[C@H]1[C@@H](COC2=C3C=C(OC)C(=CC3=CC=N2)C(N)=O)NC(=O)[C@H]1F.[H][C@@]12CC[C@H](O)[C@@]1(C)CC[C@]1([H])C3=C(CC[C@@]21[H])C=C(O)C=C3" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "javascript" + }, + { + "bbox": [ + 77, + 328, + 129, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 328, + 129, + 338 + ], + "spans": [ + { + "bbox": [ + 77, + 328, + 129, + 338 + ], + "type": "text", + "content": "Answer: No" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 76, + 369, + 456, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 369, + 456, + 380 + ], + "spans": [ + { + "bbox": [ + 76, + 369, + 456, + 380 + ], + "type": "text", + "content": "From the following information about a clinical trial, predict whether it would have an adverse event." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 76, + 390, + 534, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 390, + 534, + 422 + ], + "spans": [ + { + "bbox": [ + 76, + 390, + 534, + 422 + ], + "type": "text", + "content": "Title: A Study To Estimate The Effect of PF-06650833 On The Pharmacokinetics (PK) of Oral Contraceptive (OC) Summary: This is a Phase 1, open label, fixed sequence study of the effect of multiple dose PF-06650833 on single dose OC PK in healthy female subjects." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 77, + 422, + 112, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 422, + 112, + 430 + ], + "spans": [ + { + "bbox": [ + 77, + 422, + 112, + 430 + ], + "type": "text", + "content": "Phase: 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 77, + 432, + 142, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 432, + 142, + 441 + ], + "spans": [ + { + "bbox": [ + 77, + 432, + 142, + 441 + ], + "type": "text", + "content": "Disease: Healthy" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 77, + 443, + 168, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 443, + 168, + 452 + ], + "spans": [ + { + "bbox": [ + 77, + 443, + 168, + 452 + ], + "type": "text", + "content": "Minimum age: 18 Years" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 77, + 453, + 169, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 453, + 169, + 463 + ], + "spans": [ + { + "bbox": [ + 77, + 453, + 169, + 463 + ], + "type": "text", + "content": "Maximum age: 60 Years" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 77, + 464, + 257, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 464, + 257, + 473 + ], + "spans": [ + { + "bbox": [ + 77, + 464, + 257, + 473 + ], + "type": "text", + "content": "Healthy volunteers: Accepts Healthy Volunteers" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 77, + 474, + 533, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 474, + 533, + 494 + ], + "spans": [ + { + "bbox": [ + 77, + 474, + 533, + 494 + ], + "type": "text", + "content": "Interventions: " + }, + { + "bbox": [ + 77, + 474, + 533, + 494 + ], + "type": "inline_equation", + "content": "400\\mathrm{mg}" + }, + { + "bbox": [ + 77, + 474, + 533, + 494 + ], + "type": "text", + "content": " by mouth (PO) Once daily (QD) for 11 days; Single dose of Oral tablet containing 30 ug EE and 150 ug of LN" + } + ] + } + ], + "index": 11 + }, + { + "type": "code", + "bbox": [ + 77, + 495, + 429, + 516 + ], + "blocks": [ + { + "bbox": [ + 77, + 495, + 429, + 516 + ], + "lines": [ + { + "bbox": [ + 77, + 495, + 429, + 516 + ], + "spans": [ + { + "bbox": [ + 77, + 495, + 429, + 516 + ], + "type": "text", + "content": "Drug: CC[C@H]1[C@@H](COC2=C3C=C(OC)C(=CC3=CC=N2)C(N)=O)NC(=O)[C@H]1F.[H][C@@]12CC[C@H](O)[C@@]1(C)CC[C@]1([H])C3=C(CC[C@@]21[H])C=C(O)C=C3" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "code_body" + } + ], + "index": 12, + "sub_type": "code", + "guess_lang": "javascript" + }, + { + "bbox": [ + 77, + 525, + 129, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 525, + 129, + 535 + ], + "spans": [ + { + "bbox": [ + 77, + 525, + 129, + 535 + ], + "type": "text", + "content": "Answer: No" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "36" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 35 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 69, + 105, + 549, + 711 + ], + "blocks": [ + { + "bbox": [ + 68, + 87, + 479, + 99 + ], + "lines": [ + { + "bbox": [ + 68, + 87, + 479, + 99 + ], + "spans": [ + { + "bbox": [ + 68, + 87, + 479, + 99 + ], + "type": "text", + "content": "Table S.11 | Example of Agentic-Tx ChemBench chemical preference question answering." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 69, + 105, + 549, + 711 + ], + "lines": [ + { + "bbox": [ + 69, + 105, + 549, + 711 + ], + "spans": [ + { + "bbox": [ + 69, + 105, + 549, + 711 + ], + "type": "table", + "html": "
Example of Chembench Chemical Preference
QuestionConsider the following two drug candidates: \nA. N#Cclc(NC(=O)c2cc([N+](=O)[O-])ccc2N2CCOCC2)sc2c1CCCC2 \nB. O=C(C=Cclc00001)c1cccc1. \nWhich is more preferable for further development?
Thought 1First, I need to obtain more information about each drug candidate. I will use the SMILES to Description tool to get detailed descriptions of each molecule from their SMILES strings, starting with candidate.
Action 1SMILES to Description \nInput SMILES: N#Cclc(NC(=O)c2cc([N+](=O)[O-])ccc2N2CCOCC2)sc2c1CCCC2
Observation 1PubChem CID: 3934361 \nMolecular Formula: C20H20N4O4S \nMolecular Weight: 412.5 \nCharge: 0 \nIUPAC Name: N-(3-cyano-4,5,6,7-tetrahydro-1-benzothiophen-2-yl)-2-morpholin-4-yl-5-nitrobenzamide XLogP: 3.8 \nSynonyms: MLS000335194, CHEMBL1549645, HMS2597A10, HMS3379H10, AKOS001044982, SMR000249952, SR-01000056848, SR-01000056848-1, Z27367728 \n(Summarized) The provided information describes a single molecule (PubChem CID 3934361) with a cyano-tetrahydro-benzothiophene core, a morpholino-nitrobenzamide substituent, and an XLogP of 3.8, indicating its lipophilicity.
Thought 2Now, I will use the SMILES to Description tool to get detailed descriptions of candidate B from its SMILES string. After that, I will compare the information obtained for both candidates to determine which is more preferable for further development.
Action 2SMILES to Description \nInput SMILES: O=C(C=Cclc00001)c1cccc1
Observation 2PubChem CID: 219207 \nMolecular Formula: C14H11NO \nMolecular Weight: 209.24 \nCharge: 0 \nIUPAC Name: 1-phenyl-3-pyridin-2-ylprop-2-en-1-one XLogP: 2.7 \nSynonyms: 3-(2-PYRIDYL)-ACRYLOPHENONE, MLS002637493, azachalcone, CHEMBL1717486, DTXSID601279307, HMS3079I05, SMR001547031, 1-Phenyl-3-(2-pyridinyl)-2-propen-1-one \n(Summary) The molecule (B) represented by the SMILES O=C(C=Cclc00001)c1cccc1, also known as 1-phenyl-3-pyridin-2-ylprop-2-en-1-one, has a molecular weight of 209.24 g/mol and a calculated XLogP value of 2.7.
Observation 2
Thought 3Based on the descriptions, candidate B (XLogP = 2.7) is less lipophilic than candidate A (XLogP = 3.8). Lower lipophilicity can often be associated with better absorption and distribution properties. I will now use the ClinicalTox tool to assess the clinical toxicity of candidate B.
Action 3ClinicalTox \nInput SMILES: O=C(C=Cclc00001)c1cccc1
Observation 3Context: Humans are exposed to a variety of chemicals through food, household products, and medicines, some of which can be toxic, leading to over 30% of promising pharmaceuticals failing in human trials due to toxicity. Toxic drugs can be identified from clinical trials that failed due to toxicity, while non-toxic drugs can be identified from FDA approval status or from clinical trials that report no toxicity. Prediction returned: O=C(C=Cclc00001)c1cccc1 is not toxic! \n(Summary) Based on the provided information, drug candidate B (O=C(C=Cclc00001)c1cccc1) is predicted to be non-toxic, suggesting it might be more preferable for further development.
Observation 3
Final ResponseCandidate B is more preferable for further development. Here's why: \n-Lipophilicity (XLogP): Candidate B has a lower XLogP value (2.7) compared to Candidate A (3.8). Lower lipophilicity is generally favored in drug development. Excessive lipophilicity (high XLogP) can lead to problems like poor solubility, increased risk of off-target binding and toxicity, and difficulties in achieving good absorption and distribution in the body. A value closer to the range of many oral drugs is preferred. \n-Toxicity: The clinical toxicity tool states that candidate B is predicted to be non-toxic.
", + "image_path": "b9d2298fd6339b4d44d87216064cb18663a965c2d62e1de82868a8b0f2abce73.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "37" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 36 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 74, + 161, + 537, + 653 + ], + "blocks": [ + { + "bbox": [ + 68, + 145, + 315, + 157 + ], + "lines": [ + { + "bbox": [ + 68, + 145, + 315, + 157 + ], + "spans": [ + { + "bbox": [ + 68, + 145, + 315, + 157 + ], + "type": "text", + "content": "Table S.12 | Descriptions of tools used by Agentic-Tx." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 74, + 161, + 537, + 653 + ], + "lines": [ + { + "bbox": [ + 74, + 161, + 537, + 653 + ], + "spans": [ + { + "bbox": [ + 74, + 161, + 537, + 653 + ], + "type": "table", + "html": "
Tool NameDescription
ToxCastUses TxGemma to predict the toxicity of a given drug (SMILES string) in various ToxCast assays based on the provided context. Returns results indicating whether the drug is toxic or not in each selected assay.
ClinicalToxUses TxGemma to predict the clinical toxicity of a given drug (SMILES string) for humans. Returns a result indicating whether the drug is predicted to be toxic or not.
ChatAllows conversational interaction with TxGemma-Chat. Enables posing therapeutics-related questions and receiving responses.
MutagenicityUses TxGemma to predict whether a given drug (SMILES) is mutagenic based on the Ames test. Returns a result indicating if the drug is mutagenic or not.
IC50Uses TxGemma to predict the normalized IC50 between a drug (SMILES) and a target protein (amino acid sequence). Returns a IC50 value, with lower values suggesting potent inhibition.
Phase 1 TrialUses TxGemma to predict the approval outcome of a Phase 1 clinical trial for a drug (SMILES) against a specified disease. Returns a result indicating whether the trial would be approved or not.
Wikipedia SearchSearches Wikipedia for a given text query. Returns the top matching article's title, link, and a short summary.
PubMed SearchQueries PubMed for scientific articles based on a search text. Returns metadata (PMID, title, authors, journal, date, abstract) for the top few articles.
Web SearchPerforms a general web search. Returns titles, links, and snippets for the top search results.
HTML FetchFetched the raw HTML content of a given URL. Useful for inspecting webpage details.
SMILES to DescriptionRetrieves molecular information from PubChem for a given SMILES string. Returns properties like PubChem CID, molecular formula, IUPAC name, XLogP, and synonyms.
SMILES TherapyRetrieves therapeutic information (ChEMBL ID, mechanisms of action, drug indications, ATC classifications) for a drug given its SMILES string.
Molecule ToolProvides molecule-related functions: searching for compounds by name (returns properties and IDs) and converting between molecular representations (InChI, SMILES, InChIKey, Mol).
Molecule ConvertConverts a molecules representation from one type to another (e.g., SMILES to InChI).
Gene SequenceRetrieves amino acid sequences for a given gene name and organism. Searches NCBI Nucleotide, fetches records, and translates DNA to protein sequences.
Gene DescriptionRetrieves descriptive information about a gene from NCBI Gene, including official symbol, full name, description, and summary.
BlastPRuns a BLASTP search against NCBI databases for a given amino acid sequence. Returns hits with gene names, organisms, and accessions.
Protein DescriptionProvides descriptive information (organism, definition, accession) for a protein, either by name or amino acid sequence. Uses NCBI Protein database or BLASTP.
", + "image_path": "f505e8244b73734017b072226795a6c6f33623dd337bfbd862ee723a1ca44b5e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "38" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 37 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 194, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 194, + 83 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 194, + 83 + ], + "type": "text", + "content": "D Additional results" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 97, + 267, + 110 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 97, + 267, + 110 + ], + "spans": [ + { + "bbox": [ + 67, + 97, + 267, + 110 + ], + "type": "text", + "content": "D.1 TxGemma-Predict performance" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 118, + 544, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 118, + 544, + 324 + ], + "spans": [ + { + "bbox": [ + 67, + 118, + 544, + 324 + ], + "type": "text", + "content": "Figure S.4 compares TxGemma-27B-Predict with previous SOTA models, taking into account that Tx-LLM M achieved SOTA performance on many tasks. We provide detailed results tables for binary classification tasks in Table S.13 (comparing against specialist SOTA and base models) and Table S.15 (comparing against TxGemma-Chat and Tx-LLM), and for regression and generation tasks in Table S.14 (comparing against specialist SOTA and base models) and Table S.16 (comparing against TxGemma-Chat and Tx-LLM). Tables S.17 and S.18 list the performances of released TxGemma models trained only on datasets with commercial licenses. Figures S.5 and S.6 compares TxGemma-27B-Predict with LlaSMol and MolE, models specialized for small molecules, on small molecule tasks. Figure S.12 plots the percentage of tasks that contain contaminated datapoints overlapping with the Gemma-2 pretraining data, the percent of contaminated datapoints for these tasks, and Figure S.13 shows the results of TxGemma-27B-Predict after filtering contaminated datapoints out. We observe that most tasks have no contamination, and filtering these datapoints out does not negatively impact TxGemma-27B-Predict performance. Figure S.16 plots performances for particular feature types across multiple model sizes, showing that the integration of SMILES strings and textual information is consistent. Figure S.17 plots performances over all tasks for comparisons of model size and domain fine-tuning, showing that these variables are significant. Figure S.18 shows that TxGemma-27B-Predict toxicity and clinical trial approval predictions are correlated, likely because toxicity in an important component of trial approval. Figure S.11 plots the inference speed, normalized by the number of chips used for serving, for all model sizes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 333, + 455, + 346 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 333, + 455, + 346 + ], + "spans": [ + { + "bbox": [ + 67, + 333, + 455, + 346 + ], + "type": "text", + "content": "D.2 Conversing with TxGemma-27B-Predict and TxGemma-27B-Chat" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 354, + 544, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 354, + 544, + 499 + ], + "spans": [ + { + "bbox": [ + 67, + 354, + 544, + 499 + ], + "type": "text", + "content": "Figure S.8 illustrates an example of providing a prompt to TxGemma-27B-Predict that is not in the processed data format. TxGemma-27B-Predict is able to provide a coherent response in a manner similar to the general LLMs. Figure S.9 illustrates an example of first providing a prompt to TxGemma-27B-Predict in the processed format and asking follow-up questions in subsequent turns. In the second turn, instructing the model to not in the processed data format is able to elicit a reasonable but succinct response. However, the third turn leads to the model answering in the processed data format, highlighting the difficulty of multi-turn dialogue after training only on the processed TDC data. Figure S.7 plots the performance of TxGemma-27B-Chat on the MMLU benchmark in comparison with both Gemma-2-27B and TxGemma-27B-Predict. TxGemma-27B-Chat performs similarly to Gemma-2-27B on MMLU while TxGemma-27B-Predict scores much lower. Figure S.10 shows an example of using a specific prompting structure with TxGemma-27B-Chat to elicit reasoning on a more challenging task of clinical trial approval. If this prompting structure is not used, the model refuses to provide reasoning." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 509, + 257, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 509, + 257, + 521 + ], + "spans": [ + { + "bbox": [ + 67, + 509, + 257, + 521 + ], + "type": "text", + "content": "D.3 Agentic-Tx Tool Use Analysis" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 529, + 542, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 529, + 542, + 566 + ], + "spans": [ + { + "bbox": [ + 67, + 529, + 542, + 566 + ], + "type": "text", + "content": "Figure S.14 shows the tool usage frequency for different benchmarks, illustrating that Agentic-Tx dynamically adjusts its tool usage to suit the problem. Figure S.15 shows the most frequent tools used per question for chemical preference questions, showing consistent usage of molecule-based tools." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 575, + 503, + 589 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 575, + 503, + 589 + ], + "spans": [ + { + "bbox": [ + 67, + 575, + 503, + 589 + ], + "type": "text", + "content": "D.4 Proof-of-concept use of TxGemma for end-to-end therapeutic development" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 596, + 542, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 596, + 542, + 657 + ], + "spans": [ + { + "bbox": [ + 67, + 596, + 542, + 657 + ], + "type": "text", + "content": "In Figure S.3, we illustrate a simplified example of how TxGemma might be helpful in identifying a drug for ovarian cancer. In this example, we chose to directly prompt TxGemma, rather than using Agentic-Tx, to strictly isolate potential information leakage introduced by web search, which is outside of our training data. This approach allows us to examine the model's inherent capabilities, though we acknowledge that a full agent-based workflow is a plausible extension." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 661, + 542, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 661, + 542, + 721 + ], + "spans": [ + { + "bbox": [ + 67, + 661, + 542, + 721 + ], + "type": "text", + "content": "We initially use the DisGeNET prompt to identify an ovarian cancer-associated target gene from a short list of genes including PIK3CA, JAK2, RET. TxGemma-27B-Predict predicts that PIK3CA, a gene not found in the training set which is known to be mutated in ovarian cancer [2], has an association score of 0.7 with ovarian cancer. This association score is nearly 2.5 standard deviations above the mean score (" + }, + { + "bbox": [ + 67, + 661, + 542, + 721 + ], + "type": "inline_equation", + "content": "\\mu = 0.37" + }, + { + "bbox": [ + 67, + 661, + 542, + 721 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 661, + 542, + 721 + ], + "type": "inline_equation", + "content": "\\sigma = 0.13" + }, + { + "bbox": [ + 67, + 661, + 542, + 721 + ], + "type": "text", + "content": "), indicating a strong association. JAK2 and RET share an association score of 0.3 which is below" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "type": "text", + "content": "39" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 38 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 543, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 543, + 192 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 543, + 192 + ], + "type": "text", + "content": "the mean score. We then used TxGemma-27B-Predict to select a potential therapeutic from a molecule shortlist, prioritizing predicted " + }, + { + "bbox": [ + 67, + 71, + 543, + 192 + ], + "type": "inline_equation", + "content": "\\mathrm{IC}_{50}" + }, + { + "bbox": [ + 67, + 71, + 543, + 192 + ], + "type": "text", + "content": " against the E545K mutant (an oncogenic mutation [3]), toxicity, and clinical trial success. Our manually curated shortlist of drugs, unseen to the model during training, include two existing cancer therapies including alpelisib and afatinib and a novel molecule which we randomly generated. Both afatinib " + }, + { + "bbox": [ + 67, + 71, + 543, + 192 + ], + "type": "inline_equation", + "content": "(1.02\\mu \\mathrm{M}\\mathrm{IC}_{50})" + }, + { + "bbox": [ + 67, + 71, + 543, + 192 + ], + "type": "text", + "content": " and the novel molecule " + }, + { + "bbox": [ + 67, + 71, + 543, + 192 + ], + "type": "inline_equation", + "content": "(10.2\\mu \\mathrm{M}\\mathrm{IC}_{50})" + }, + { + "bbox": [ + 67, + 71, + 543, + 192 + ], + "type": "text", + "content": " exhibit high predicted " + }, + { + "bbox": [ + 67, + 71, + 543, + 192 + ], + "type": "inline_equation", + "content": "\\mathrm{IC}_{50}" + }, + { + "bbox": [ + 67, + 71, + 543, + 192 + ], + "type": "text", + "content": " values, suggesting weak inhibition. However, alpelisib has a predicted " + }, + { + "bbox": [ + 67, + 71, + 543, + 192 + ], + "type": "inline_equation", + "content": "\\mathrm{IC}_{50}" + }, + { + "bbox": [ + 67, + 71, + 543, + 192 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 67, + 71, + 543, + 192 + ], + "type": "inline_equation", + "content": "30~\\mathrm{nM}" + }, + { + "bbox": [ + 67, + 71, + 543, + 192 + ], + "type": "text", + "content": ", suggestive of potent inhibition and relatively close to the experimental value of " + }, + { + "bbox": [ + 67, + 71, + 543, + 192 + ], + "type": "inline_equation", + "content": "5\\mathrm{nM}" + }, + { + "bbox": [ + 67, + 71, + 543, + 192 + ], + "type": "text", + "content": " suggested by Chen et al. [4] and Fritsch et al. [5]. TxGemma-27B-Predict also predicts that alpelisib is not mutagenic and would pass a phase 1 clinical trial for ovarian cancer. This iterative evaluation also corroborated by existing evidence: alpelisib is approved for breast cancer [6] and has shown activity in ovarian cancer [7, 8, 9]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 196, + 541, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 196, + 541, + 293 + ], + "spans": [ + { + "bbox": [ + 67, + 196, + 541, + 293 + ], + "type": "text", + "content": "This workflow demonstrates a proof-of-concept for TxGemma's application in automating and optimizing therapeutic selection. We anticipate an agentic system capable of generating comprehensive lists of potential therapies and gene-disease associations paired with TxGemma would enable rapid prioritization and filtering, helping in reducing the candidate pool and accelerating the transition to preclinical studies. However, it's crucial to acknowledge the limitations of this demonstration. Clinical trial predictions are limited to Phase 1 success, and mutagenicity predictions do not encompass all aspects of small molecule toxicity. Future work should include experimental validation of TxGemma predictions and consideration of additional toxicity factors, such as hematologic toxicity, which were not included in our data." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 78, + 316, + 193, + 336 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 316, + 193, + 336 + ], + "spans": [ + { + "bbox": [ + 78, + 316, + 193, + 336 + ], + "type": "text", + "content": "TxGemma: Gene-Disease Association" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 78, + 342, + 195, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 342, + 195, + 371 + ], + "spans": [ + { + "bbox": [ + 78, + 342, + 195, + 371 + ], + "type": "text", + "content": "Q: Predict association from O to 1 between the following gene and malignant neoplasm of ovary." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 79, + 374, + 149, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 374, + 149, + 381 + ], + "spans": [ + { + "bbox": [ + 79, + 374, + 149, + 381 + ], + "type": "text", + "content": "PIK3CA:MPPRPSSGELW" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 79, + 388, + 182, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 388, + 182, + 410 + ], + "spans": [ + { + "bbox": [ + 79, + 388, + 182, + 410 + ], + "type": "text", + "content": "A: PI3KCA has an association score of 0.7. 0.7 is 2.5a above the mean score" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 79, + 430, + 182, + 451 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 430, + 182, + 451 + ], + "spans": [ + { + "bbox": [ + 79, + 430, + 182, + 451 + ], + "type": "text", + "content": "TxGemma: Drug-Target Interaction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 79, + 456, + 208, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 456, + 208, + 477 + ], + "spans": [ + { + "bbox": [ + 79, + 456, + 208, + 477 + ], + "type": "text", + "content": "Q: Given the following gene, predict how effectively the drug will inhibit it." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 79, + 483, + 170, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 483, + 170, + 497 + ], + "spans": [ + { + "bbox": [ + 79, + 483, + 170, + 497 + ], + "type": "text", + "content": "PI3KCA E545K: MPPRSPSGELW... \nAlpelisib: C1-Cc(SC(-N)NC...)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 80, + 504, + 159, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 504, + 159, + 517 + ], + "spans": [ + { + "bbox": [ + 80, + 504, + 159, + 517 + ], + "type": "text", + "content": "A: Alpelisib has a IC5O of 30 nM." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 215, + 331, + 410, + 475 + ], + "blocks": [ + { + "bbox": [ + 215, + 331, + 410, + 475 + ], + "lines": [ + { + "bbox": [ + 215, + 331, + 410, + 475 + ], + "spans": [ + { + "bbox": [ + 215, + 331, + 410, + 475 + ], + "type": "image", + "image_path": "229596d5615d8db9e940ffb05b38f2d2da4d2620458f30f93387cd1de47e9957.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 561, + 543, + 657 + ], + "lines": [ + { + "bbox": [ + 67, + 561, + 543, + 657 + ], + "spans": [ + { + "bbox": [ + 67, + 561, + 543, + 657 + ], + "type": "text", + "content": "Figure S.3 | Proof-of-concept example of applying TxGemma to end-to-end therapeutic development. TxGemma is used to suggest a therapeutic for ovarian cancer by first identifying PIK3CA as an associated gene target from a list of possible genes. Then, from a list of candidate therapeutics, TxGemma predicts that alpelisib (a molecule previously unseen to TxGemma that has shown activity against ovarian cancer and is approved for breast cancer) would bind the E545K mutant of PIK3CA, that it would not be toxic/mutagenic, and that it would be approved in a clinical trial. Note that this example serves as a proof-of-concept demonstration and does not account for all aspects of efficacy, toxicity, or trial approval. Rigorous experimental validation of TxGemma predictions to completely new therapeutics is also a critical step to evaluating TxGemma and remains an area of future work." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 415, + 317, + 509, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 415, + 317, + 509, + 338 + ], + "spans": [ + { + "bbox": [ + 415, + 317, + 509, + 338 + ], + "type": "text", + "content": "TxGemma: Clinical Trial Approval" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 415, + 342, + 534, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 415, + 342, + 534, + 371 + ], + "spans": [ + { + "bbox": [ + 415, + 342, + 534, + 371 + ], + "type": "text", + "content": "Q: Predict whether the following drug will pass a phase I clinical trial against malignant neoplasm of ovary." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 415, + 377, + 496, + 384 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 415, + 377, + 496, + 384 + ], + "spans": [ + { + "bbox": [ + 415, + 377, + 496, + 384 + ], + "type": "text", + "content": "Alpelisib:CC1=C(SC(=N1)NC..." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 415, + 392, + 443, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 415, + 392, + 443, + 404 + ], + "spans": [ + { + "bbox": [ + 415, + 392, + 443, + 404 + ], + "type": "text", + "content": "A: Approved." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 416, + 430, + 496, + 451 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 416, + 430, + 496, + 451 + ], + "spans": [ + { + "bbox": [ + 416, + 430, + 496, + 451 + ], + "type": "text", + "content": "TxGemma: Toxicity Prediction" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 416, + 457, + 514, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 416, + 457, + 514, + 478 + ], + "spans": [ + { + "bbox": [ + 416, + 457, + 514, + 478 + ], + "type": "text", + "content": "Q: Predict whether the following drug is mutagenic." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 416, + 483, + 498, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 416, + 483, + 498, + 491 + ], + "spans": [ + { + "bbox": [ + 416, + 483, + 498, + 491 + ], + "type": "text", + "content": "Alpelisib:CC1=C(SC(=N1)NC..." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 416, + 497, + 457, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 416, + 497, + 457, + 511 + ], + "spans": [ + { + "bbox": [ + 416, + 497, + 457, + 511 + ], + "type": "text", + "content": "A: Not mutagenic." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "40" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 39 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 118, + 70, + 476, + 246 + ], + "blocks": [ + { + "bbox": [ + 118, + 70, + 476, + 246 + ], + "lines": [ + { + "bbox": [ + 118, + 70, + 476, + 246 + ], + "spans": [ + { + "bbox": [ + 118, + 70, + 476, + 246 + ], + "type": "image", + "image_path": "42c41199fcf8e1c1b61be220eadee67478dcac28dec9f92bb963ec83de23b969.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 118, + 255, + 475, + 449 + ], + "blocks": [ + { + "bbox": [ + 118, + 255, + 475, + 449 + ], + "lines": [ + { + "bbox": [ + 118, + 255, + 475, + 449 + ], + "spans": [ + { + "bbox": [ + 118, + 255, + 475, + 449 + ], + "type": "image", + "image_path": "0fc6f2350e51b7e6cd5a7bbc1317a452740c03d27b918c2dcd047504fcfadd35.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 118, + 455, + 475, + 620 + ], + "blocks": [ + { + "bbox": [ + 118, + 455, + 475, + 620 + ], + "lines": [ + { + "bbox": [ + 118, + 455, + 475, + 620 + ], + "spans": [ + { + "bbox": [ + 118, + 455, + 475, + 620 + ], + "type": "image", + "image_path": "d6c50205b6b2c913f7c73865098c30a77a468c56c5c6ec1feffbffeeab77100e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 190, + 628, + 247, + 635 + ], + "lines": [ + { + "bbox": [ + 190, + 628, + 247, + 635 + ], + "spans": [ + { + "bbox": [ + 190, + 628, + 247, + 635 + ], + "type": "text", + "content": "Multi-instance tasks" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 342, + 628, + 444, + 635 + ], + "lines": [ + { + "bbox": [ + 342, + 628, + 444, + 635 + ], + "spans": [ + { + "bbox": [ + 342, + 628, + 444, + 635 + ], + "type": "text", + "content": "Single-instance and generative tasks" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 651, + 542, + 723 + ], + "lines": [ + { + "bbox": [ + 67, + 651, + 542, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 651, + 542, + 723 + ], + "type": "text", + "content": "Figure S.4 | Performance of TxGemma-27B-Predict compared to generalist and specialist SOTA models (top) The median relative change in performance of TxGemma-27B-Predict compared to Tx-LLM M. (middle) The median relative change in performance of TxGemma-27B-Predict compared to specialist SOTA models. (bottom) The median relative change in performance of TxGemma-27B-Predict compared to all SOTA models, including both Tx-LLM M and specialist models. Multi-instance tasks indicate tasks that involve multiple features, whereas single-instance tasks only involve one feature. The tasks within each task type are defined in Tables S.2 and S.3." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "41" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 40 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 189, + 132, + 272, + 251 + ], + "blocks": [ + { + "bbox": [ + 189, + 132, + 272, + 251 + ], + "lines": [ + { + "bbox": [ + 189, + 132, + 272, + 251 + ], + "spans": [ + { + "bbox": [ + 189, + 132, + 272, + 251 + ], + "type": "image", + "image_path": "d64008d7bba648430f646cee132bb06a7242ca81f3335858adf4a949d8bac876.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 268, + 544, + 304 + ], + "lines": [ + { + "bbox": [ + 67, + 268, + 544, + 304 + ], + "spans": [ + { + "bbox": [ + 67, + 268, + 544, + 304 + ], + "type": "text", + "content": "Figure S.5 | TxGemma performs comparably to LlaSMol on small molecule tasks. Accuracy is reported for binary classification tasks, and RMSE is reported for regression tasks. BBBP corresponds to BBB Martins in TDC tasks, ESOL corresponds to Solubility AqSolDB, and Lipo corresponds to Lipophilicity AstraZeneca." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 277, + 133, + 416, + 248 + ], + "blocks": [ + { + "bbox": [ + 277, + 133, + 416, + 248 + ], + "lines": [ + { + "bbox": [ + 277, + 133, + 416, + 248 + ], + "spans": [ + { + "bbox": [ + 277, + 133, + 416, + 248 + ], + "type": "image", + "image_path": "9d4476e7572c6c23be88f9ba85d88ce520cdc474eb566aa40abb2c1e2e6cb7ea.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 72, + 437, + 192, + 591 + ], + "blocks": [ + { + "bbox": [ + 72, + 437, + 192, + 591 + ], + "lines": [ + { + "bbox": [ + 72, + 437, + 192, + 591 + ], + "spans": [ + { + "bbox": [ + 72, + 437, + 192, + 591 + ], + "type": "image", + "image_path": "e6c13c82f9aad77ffe02614d6eac77afddf8ff42b5e2ab8a220b12c5e64a976b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 619, + 544, + 656 + ], + "lines": [ + { + "bbox": [ + 67, + 619, + 544, + 656 + ], + "spans": [ + { + "bbox": [ + 67, + 619, + 544, + 656 + ], + "type": "text", + "content": "Figure S.6 | TxGemma performs comparably to MolE on small molecule tasks. Comparison of MolE with TxGemma-27B-Predict on TDC tasks, separated by metric type (MAE, AUROC, Spearman correlation, and AUPRC). TxGemma-27B-Predict performs better than MolE on 10 out of 22 tasks." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 187, + 437, + 304, + 577 + ], + "blocks": [ + { + "bbox": [ + 187, + 437, + 304, + 577 + ], + "lines": [ + { + "bbox": [ + 187, + 437, + 304, + 577 + ], + "spans": [ + { + "bbox": [ + 187, + 437, + 304, + 577 + ], + "type": "image", + "image_path": "3485a458e0e83c0575b63d1e81dbaa9c3d70d585063afc9a1cb9ef52ff9240c6.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 304, + 437, + 421, + 591 + ], + "blocks": [ + { + "bbox": [ + 304, + 437, + 421, + 591 + ], + "lines": [ + { + "bbox": [ + 304, + 437, + 421, + 591 + ], + "spans": [ + { + "bbox": [ + 304, + 437, + 421, + 591 + ], + "type": "image", + "image_path": "09534e3b6d118249db6e3edff3bfb85233449f57161dcacc055ff510acfaca7c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 419, + 437, + 538, + 578 + ], + "blocks": [ + { + "bbox": [ + 419, + 437, + 538, + 578 + ], + "lines": [ + { + "bbox": [ + 419, + 437, + 538, + 578 + ], + "spans": [ + { + "bbox": [ + 419, + 437, + 538, + 578 + ], + "type": "image", + "image_path": "c48b857842aeac69278aa4332949b757fe4bc93f85a7901073f2eedfb083c3c4.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "42" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 41 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 37, + 187, + 573, + 639 + ], + "blocks": [ + { + "bbox": [ + 67, + 160, + 541, + 184 + ], + "lines": [ + { + "bbox": [ + 67, + 160, + 541, + 184 + ], + "spans": [ + { + "bbox": [ + 67, + 160, + 541, + 184 + ], + "type": "text", + "content": "Table S.13 | Model performance on binary classification tasks. TxGemma-Predict and Gemma-2 performances compared with specialist SOTA for each binary classification task, along with the metric type." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 37, + 187, + 573, + 639 + ], + "lines": [ + { + "bbox": [ + 37, + 187, + 573, + 639 + ], + "spans": [ + { + "bbox": [ + 37, + 187, + 573, + 639 + ], + "type": "table", + "html": "
Task NameMetricSpecialist SOTAGemma-2-2BGemma-2-9BGemma-2-27BTxGemma-2B-PredictTxGemma-9B-PredictTxGemma-27B-Predict
AMESAUROC0.871 [10]0.4870.6050.5080.7960.7980.816
BBB MartinsAUROC0.915 [11]0.2500.6450.5460.8640.8740.907
Bioavailability MaAUROC0.748 [12]0.4790.5840.5790.7150.6550.696
CYP1A2 VeithAUPRC0.900 [13]0.3880.5330.5620.9100.9160.922
CYP2C19 VeithAUROC0.890 [13]0.4560.5950.6190.9050.9060.899
CYP2C9 Substrate CarbonMangelsAUPRC0.441 [10]0.2930.3360.3670.4570.4680.427
CYP2C9 VeithAUPRC0.839 [14]0.2830.3740.4170.8010.7990.798
CYP2D6 Substrate CarbonMangelsAUPRC0.736 [14]0.2330.3290.3860.6050.6030.706
CYP2D6 VeithAUPRC0.739 [14]0.1450.1660.1850.6370.6640.681
CYP3A4 Substrate CarbonMangelsAUROC0.662 [15]0.5140.5850.5960.6690.6220.690
CYP3A4 VeithAUPRC0.904 [14]0.4270.5310.5350.8440.8390.854
Carcinogens LaguninAccuracy0.770 [16]0.2500.2860.3390.8210.8390.857
ClinToxAUROC0.948 [17]0.4370.4820.4240.8100.8310.888
DILIAUROC0.925 [10]0.3200.6510.6270.8750.8480.887
HIA HouAUROC0.988 [18]0.2570.9320.7830.9370.9670.988
HIVAUROC0.851 [19]0.4910.4950.5370.7370.7340.764
HuRIAUPRC0.724 [20]0.4960.4840.5260.7510.7790.799
MHC1 IEDB IMGT NielsenAUROC0.986 [21]0.4980.5040.5170.9100.9270.929
MHC2 IEDB JensenAUROC0.940 [22]0.4980.5260.5440.8120.8500.851
PAMPA NCATSAUROC0.900 [23]0.4650.5830.5440.6420.6710.705
Pgp BroccatelliAUROC0.935 [10]0.4160.6700.4970.9000.9110.936
SARSCOV2 3CLPro DiamondAUROC0.800 [24]0.3010.3880.4770.7330.7080.769
SARSCoV2 Vitro TouretAUROC0.640 [25]0.5680.6110.4790.6500.6680.598
SAbDab ChenAUPRC0.510 [26]0.5320.6960.7010.6760.8070.767
Skin ReactionAUROC0.840 [27]0.4290.5460.4930.6710.6480.708
Tox21AUROC0.961 [28]0.3580.4360.4970.8810.8960.893
ToxCastAUROC0.777 [17]0.4850.5120.5580.7840.7670.800
butkiewiczAUROC0.840 [29]0.4570.4910.4910.7910.7720.831
hERGAUROC0.874 [12]0.5380.6390.5000.8760.8810.884
hERG KarimAccuracy0.770 [30]0.5290.5320.5220.7780.7940.774
herg centralAUROC0.860 [31]0.4810.5110.5170.8800.8610.896
miRTarBaseAccuracy0.804 [32]0.4980.5010.4980.8050.8290.801
phase1AUROC0.576 [33]0.5620.5620.5530.6420.6350.622
phase2AUROC0.645 [33]0.5430.5710.5310.6650.6680.676
phase3AUROC0.723 [33]0.5590.5670.5590.7310.7290.739
weberAUROC0.870 [34]0.4660.5860.4690.7300.7270.749
", + "image_path": "098d8717f5dbde8ee0685821cf521f28993021a44eb08614ac24af4103f4c735.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "43" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 42 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 56, + 230, + 555, + 609 + ], + "blocks": [ + { + "bbox": [ + 67, + 189, + 542, + 224 + ], + "lines": [ + { + "bbox": [ + 67, + 189, + 542, + 224 + ], + "spans": [ + { + "bbox": [ + 67, + 189, + 542, + 224 + ], + "type": "text", + "content": "Table S.14 | Model performance on regression and generation tasks. TxGemma-Predict and Gemma-2 performances compared with specialist SOTA for each regression and generation task, along with the metric type. Tasks for which we did not find a specialist SOTA value are indicated with N/A." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 56, + 230, + 555, + 609 + ], + "lines": [ + { + "bbox": [ + 56, + 230, + 555, + 609 + ], + "spans": [ + { + "bbox": [ + 56, + 230, + 555, + 609 + ], + "type": "table", + "html": "
Task NameMetricSpecialist SOTAGemma-2-2BGemma-2-9BGemma-2-27BTxGemma-2B-PredictTxGemma-9B-PredictTxGemma-27B-Predict
BindingDB PatentPCC0.588 [35]-0.066-0.0390.0300.4220.5240.538
BindingDB ic50Spearman0.637 [36]0.0010.0020.0440.3990.3980.445
BindingDB kdPCC0.712 [37]0.197-0.0090.1190.3520.3700.456
BindingDB kiPCC0.840 [38]-0.018-0.053-0.0270.6610.7370.676
Buchwald HartwigPCC0.786 [39]0.5280.6360.6840.8610.9150.910
Caco2 WangMAE0.285 [18]1.0570.5330.6180.4760.3730.401
Clearance Hepatocyte AZSpearman0.440 [40]0.1410.1630.2140.3530.3380.259
Clearance Microsome AZSpearman0.625 [18]0.2390.3250.2940.4680.6230.462
DAVISMSE0.219 [41]2.7059.0544.4730.6010.5870.555
DisGeNETMAEN/A0.2940.2950.2770.0570.0540.054
DrugComb BlissMAE4.560 [42]8.2137.4136.4564.2304.3374.156
DrugComb CSSMAE16.858 [42]36.84733.83722.61415.75216.48015.000
DrugComb HSAMAE4.453 [42]7.4587.3656.6704.2314.3354.209
DrugComb LoeweMAE9.184 [42]13.87313.36914.73117.34218.66517.336
DrugComb ZIPMAE4.027 [42]8.5886.2265.4043.9503.9043.807
GDSC1PCC0.860 [43]-0.0410.0730.0930.8760.5450.892
GDSC2PCC0.860 [43]-0.043-0.0370.0860.8240.5390.912
Half Life ObachSpearman0.547 [44]0.2880.2840.4850.3860.4940.458
KIBAMSE0.154 [41]2.8871.9252.0160.5880.5480.633
LD50 ZhuMAE0.552 [18]1.9710.8960.8740.7100.6300.628
LeenaySpearman0.740 [45]0.0850.0910.1460.0970.0670.276
Lipophilicity AstraZenecaMAE0.467 [46]1.5061.2071.0320.6100.5650.539
OncoPolyPharmacologyPCC0.730 [47]-0.0400.0640.0720.4730.5180.540
PPBR AZMAE7.788 [46]10.8369.7689.8799.2668.8899.029
Protein SAbDabMAEN/A1.2801.1701.1631.0661.1061.210
Solubility AqSolDBMAE0.761 [46]4.2142.5493.0960.9610.8680.821
TAPMAEN/A5.0084.2413.9585.3014.4734.280
USPTOAccuracy0.415 [48]0.0000.0010.0000.2870.0970.084
USPTO YieldsPCC0.361 [39]-0.0150.0260.0640.0110.0310.395
VDss LombardoSpearman0.627 [49]0.1000.4130.3540.5640.6070.560
", + "image_path": "f86eb6e369466b452142128bdecbe6aacee09469cd8f80397c5d297b32679576.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 752 + ], + "type": "text", + "content": "44" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 43 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 44, + 185, + 567, + 630 + ], + "blocks": [ + { + "bbox": [ + 67, + 156, + 541, + 178 + ], + "lines": [ + { + "bbox": [ + 67, + 156, + 541, + 178 + ], + "spans": [ + { + "bbox": [ + 67, + 156, + 541, + 178 + ], + "type": "text", + "content": "Table S.15 | Model performance on binary classification tasks. TxGemma-Predict, TxGemma-Chat, and Tx-LLM performances for each binary classification task, along with the metric type." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 44, + 185, + 567, + 630 + ], + "lines": [ + { + "bbox": [ + 44, + 185, + 567, + 630 + ], + "spans": [ + { + "bbox": [ + 44, + 185, + 567, + 630 + ], + "type": "table", + "html": "
Task NameMetricTxGemma-9B-PredictTxGemma-27B-PredictTxGemma-9B-ChatTxGemma-27B-ChatTx-LLM STx-LLM M
AMESAUROC0.7980.8160.7210.7330.7850.786
BBB MartinsAUROC0.8740.9070.8110.8610.8050.882
Bioavailability MaAUROC0.6550.6960.6200.6590.6050.702
CYP1A2 VeithAUPRC0.9160.9220.8390.8230.9060.914
CYP2C19 VeithAUROC0.9060.8990.8370.8280.8770.895
CYP2C9 Substrate CarbonMangelsAUPRC0.4680.4270.3820.4270.4030.436
CYP2C9 VeithAUPRC0.7990.7980.6670.6820.7500.788
CYP2D6 Substrate CarbonMangelsAUPRC0.6030.7060.5490.7000.6430.600
CYP2D6 VeithAUPRC0.6640.6810.5040.4350.6050.659
CYP3A4 Substrate CarbonMangelsAUROC0.6220.6900.6420.6660.6370.647
CYP3A4 VeithAUPRC0.8390.8540.7490.7500.8000.840
Carcinogens LaguninAccuracy0.8390.8570.8930.9110.8570.786
ClinToxAUROC0.8310.8880.7110.6370.8180.863
DILIAUROC0.8480.8870.6880.7660.7270.882
HIA HouAUROC0.9670.9880.8720.8970.9420.990
HIV*AUROC0.7340.7640.6120.5820.6860.732
HuRIAUPRC0.7790.7990.6280.6210.7050.753
MHC1 IEDB IMGT NielsenAUROC0.9270.9290.8750.8250.9130.907
MHC2 IEDB JensenAUROC0.8500.8510.7240.6830.7810.863
PAMPA NCATSAUROC0.6710.7050.7350.6640.6460.668
Pgp BroccatelliAUROC0.9110.9360.8990.9120.9090.939
SARSCOV2 3CLPro DiamondAUROC0.7080.7690.6990.7220.7550.712
SARSCoV2 Vitro TouretAUROC0.6680.5980.5030.5060.5120.601
SAbDab ChenAUPRC0.8070.7670.7020.7190.3900.473
Skin ReactionAUROC0.6480.7080.6380.5430.5640.615
Tox21AUROC0.8960.8930.8070.7970.8580.882
ToxCastAUROC0.7670.8000.7540.7340.7790.792
butkiewiczAUROC0.7720.8310.6290.6190.5740.566
hERGAUROC0.8810.8840.8300.8320.8790.909
hERG KarimAccuracy0.7940.7740.6570.6680.7240.745
herg centralAUROC0.8610.8960.8300.8070.8800.888
miRTarBaseAccuracy0.8290.8010.6790.6440.7650.799
phase1AUROC0.6350.6220.5760.5570.6240.667
phase2AUROC0.6680.6760.6380.6260.6390.676
phase3AUROC0.7290.7390.6830.6680.7010.728
weberAUROC0.7270.7490.6720.6430.7380.743
", + "image_path": "9c83b0f019f49296d6ba47ff826893c3814c3507cfe6ad3bd8589e6ca7128176.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 68, + 633, + 279, + 643 + ], + "lines": [ + { + "bbox": [ + 68, + 633, + 279, + 643 + ], + "spans": [ + { + "bbox": [ + 68, + 633, + 279, + 643 + ], + "type": "text", + "content": "* To predict whether compounds have Anti-HIV properties." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "45" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 44 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 63, + 224, + 548, + 602 + ], + "blocks": [ + { + "bbox": [ + 67, + 195, + 541, + 218 + ], + "lines": [ + { + "bbox": [ + 67, + 195, + 541, + 218 + ], + "spans": [ + { + "bbox": [ + 67, + 195, + 541, + 218 + ], + "type": "text", + "content": "Table S.16 | Model performance on regression and generation tasks. TxGemma-Predict, TxGemma-Chat, and Tx-LLM performances for each regression and generation task, along with the metric type." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 63, + 224, + 548, + 602 + ], + "lines": [ + { + "bbox": [ + 63, + 224, + 548, + 602 + ], + "spans": [ + { + "bbox": [ + 63, + 224, + 548, + 602 + ], + "type": "table", + "html": "
Task NameMetricTxGemma-9B-PredictTxGemma-27B-PredictTxGemma-9B-ChatTxGemma-27B-ChatTx-LLM STx-LLM M
BindingDB PatentPCC0.5240.5380.4520.2200.4740.531
BindingDB ic50Spearman0.3980.4450.4120.3620.3260.311
BindingDB kdPCC0.3700.4560.1620.1590.3170.391
BindingDB kiPCC0.7370.6760.4480.2110.5650.726
Buchwald HartwigPCC0.9150.9100.2550.7570.6820.905
Caco2 WangMAE0.3730.4010.6430.3980.6210.432
Clearance Hepatocyte AZSpearman0.3380.2590.1970.1500.2560.385
Clearance Microsome AZSpearman0.6230.4620.3450.4200.3850.413
DAVISMSE0.5870.5550.6080.5610.5640.704
DisGeNETMAE0.0540.0540.0660.0640.0590.057
DrugComb BlissMAE4.3374.1564.5024.5114.4254.104
DrugComb CSSMAE16.48015.00016.38416.90014.74014.057
DrugComb HSAMAE4.3354.2094.4974.5204.3114.118
DrugComb LoeweMAE18.66517.33616.99416.91417.42817.381
DrugComb ZIPMAE3.9043.8074.1394.1414.0473.777
GDSC1PCC0.5450.8920.8610.8020.8760.887
GDSC2PCC0.5390.9120.8640.8230.8960.900
Half Life ObachSpearman0.4940.4580.3300.4140.5250.448
KIBAMSE0.5480.6330.7050.8520.7090.548
LD50 ZhuMAE0.6300.6280.7400.7050.8080.618
LeenaySpearman0.0670.2760.1280.0950.0480.083
Lipophilicity AstraZenecaMAE0.5650.5390.9850.8420.7790.587
OncoPolyPharmacologyPCC0.5180.5400.3590.1930.4180.552
PPBR AZMAE8.8899.02911.36710.89511.1389.108
Protein SAbDabMAE1.1061.2101.2681.1161.4321.268
Solubility AqSolDBMAE0.8680.8211.1591.1330.9310.987
TAPMAE4.4734.2804.8594.0835.0754.983
USPTOAccuracy0.0970.0840.0860.0910.2200.239
USPTO YieldsPCC0.0310.3950.0030.0260.0420.070
VDss LombardoSpearman0.6070.5600.3960.4070.4970.609
", + "image_path": "1b5ffa1d3820b6dfedd7efb2c60c35cd6a2033c4153cbd94273ca4ae0964a8d1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "46" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 45 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 69, + 207, + 541, + 619 + ], + "blocks": [ + { + "bbox": [ + 67, + 167, + 541, + 202 + ], + "lines": [ + { + "bbox": [ + 67, + 167, + 541, + 202 + ], + "spans": [ + { + "bbox": [ + 67, + 167, + 541, + 202 + ], + "type": "text", + "content": "Table S.17 | Model performance on binary classification tasks for models trained only on datasets with commercial licenses. TxGemma-Predict and TxGemma-Chat performances for each binary classification task, along with the metric type." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 69, + 207, + 541, + 619 + ], + "lines": [ + { + "bbox": [ + 69, + 207, + 541, + 619 + ], + "spans": [ + { + "bbox": [ + 69, + 207, + 541, + 619 + ], + "type": "table", + "html": "
Task NameMetricTxGemma-2B-PredictTxGemma-9B-PredictTxGemma-27B-PredictTxGemma-9B-ChatTxGemma-27B-Chat
AMESAUROC0.8120.8030.8260.7230.729
BBB MartinsAUROC0.8830.8490.8990.8320.848
Bioavailability MaAUROC0.6880.6880.7240.6660.625
CYP1A2 VeithAUPRC0.9110.9140.9160.8620.817
CYP2C19 VeithAUROC0.9050.8970.8970.8440.823
CYP2C9 Substrate CarbonMangelsAUPRC0.4170.3900.4600.4140.375
CYP2C9 VeithAUPRC0.7870.8000.7930.7000.685
CYP2D6 Substrate CarbonMangelsAUPRC0.6260.6970.7060.6530.704
CYP2D6 VeithAUPRC0.6660.6620.6770.5170.422
CYP3A4 Substrate CarbonMangelsAUROC0.6380.6800.6920.6440.653
CYP3A4 VeithAUPRC0.8420.8390.8520.7600.747
Carcinogens LaguninAccuracy0.9110.8570.8750.8930.929
ClinToxAUROC0.9170.8150.8840.7160.595
DILIAUROC0.8290.8230.9270.6750.797
HIA HouAUROC0.9840.9540.9900.9060.927
HIVAUROC0.7810.7300.7680.6410.589
HuRIAUPRC0.7350.7670.7970.6850.620
MHC1 IEDB IMGT NielsenAUROC0.9300.9290.9330.8870.826
MHC2 IEDB JensenAUROC0.8550.8520.8550.7330.682
PAMPA NCATSAUROC0.6940.6300.7240.6840.659
Pgp BroccatelliAUROC0.9220.9320.9410.8730.920
SARSCOV2 3CLPro DiamondAUROC0.7480.7990.6760.7160.712
SARSCoV2 Vitro TouretAUROC0.6590.6220.5970.5270.516
SAbDab ChenAUPRC0.7260.7450.7930.5230.731
Skin ReactionAUROC0.6910.6240.7330.6210.571
Tox21AUROC0.8970.8930.8900.8180.797
ToxCastAUROC0.7870.7660.7970.7540.735
butkiewiczAUROC0.8110.7750.8260.6810.606
hERGAUROC0.9020.8900.8940.8550.829
hERG KarimAccuracy0.7780.7960.7720.6490.673
herg centralAUROC0.8900.8600.8920.8420.805
miRTarBaseAccuracy0.8180.8340.8020.6720.649
weberAUROC0.7500.6970.7490.6920.645
", + "image_path": "011981604316ec859899c67e2c3b9723288769a28b7e640db6e16cd5b20b778e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 68, + 622, + 280, + 632 + ], + "lines": [ + { + "bbox": [ + 68, + 622, + 280, + 632 + ], + "spans": [ + { + "bbox": [ + 68, + 622, + 280, + 632 + ], + "type": "text", + "content": "* To predict whether compounds have Anti-HIV properties." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "47" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 46 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 86, + 247, + 525, + 591 + ], + "blocks": [ + { + "bbox": [ + 67, + 206, + 541, + 242 + ], + "lines": [ + { + "bbox": [ + 67, + 206, + 541, + 242 + ], + "spans": [ + { + "bbox": [ + 67, + 206, + 541, + 242 + ], + "type": "text", + "content": "Table S.18 | Model performance on regression and generation tasks for models trained only on datasets with commercial licenses. TxGemma-Predict and TxGemma-Chat performances for each regression or generation task, along with the metric type." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 86, + 247, + 525, + 591 + ], + "lines": [ + { + "bbox": [ + 86, + 247, + 525, + 591 + ], + "spans": [ + { + "bbox": [ + 86, + 247, + 525, + 591 + ], + "type": "table", + "html": "
Task NameMetricTxGemma-2B-PredictTxGemma-9B-PredictTxGemma-27B-PredictTxGemma-9B-ChatTxGemma-27B-Chat
BindingDB PatentPCC0.5560.3760.5370.4380.118
BindingDB ic50Spearman0.4250.3130.4650.4430.361
BindingDB kdPCC0.4900.3930.2890.2070.156
BindingDB kiPCC0.7280.7120.6700.3870.218
Buchwald HartwigPCC0.9200.9180.9030.5740.818
Caco2 WangMAE0.6190.4910.4790.5880.383
Clearance Hepatocyte AZSpearman0.2920.3780.3500.1660.190
Clearance Microsome AZSpearman0.5210.5240.5100.3940.395
DAVISMSE0.5760.5640.5750.5610.561
DrugComb BlissMAE4.0884.2864.1574.4544.519
DrugComb CSSMAE14.56815.37014.92515.96016.649
DrugComb HSAMAE4.0634.2824.1784.4864.529
DrugComb LoeweMAE17.31317.86217.32717.19016.873
DrugComb ZIPMAE3.7373.8483.8234.0934.132
Half Life ObachSpearman0.4230.3480.4910.2690.393
KIBAMSE0.5620.5250.5540.8300.858
LD50 ZhuMAE0.6980.7180.6770.7240.721
LeenaySpearman0.1140.0890.2590.0780.183
Lipophilicity AstraZenecaMAE0.5710.6670.6130.8340.837
OncoPolyPharmacologyPCC0.5560.4370.5310.3880.148
PPBR AZMAE8.8139.1778.79211.00411.025
Protein SAbDabMAE1.1171.0221.0721.3481.173
Solubility AqSolDBMAE0.9111.1850.8021.1601.135
TAPMAE5.4984.8394.0884.6114.444
USPTOAccuracy0.3160.0410.2810.1450.090
USPTO YieldsPCC0.4710.0020.3500.1140.002
VDss LombardoSpearman0.5940.5380.5910.4100.487
", + "image_path": "2f9c4d08248fb222ff8b63a338289fdf3f2fba41aaaec627497361f9dcd3fb03.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "48" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 47 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 123, + 72, + 484, + 622 + ], + "blocks": [ + { + "bbox": [ + 123, + 72, + 484, + 622 + ], + "lines": [ + { + "bbox": [ + 123, + 72, + 484, + 622 + ], + "spans": [ + { + "bbox": [ + 123, + 72, + 484, + 622 + ], + "type": "image", + "image_path": "4060b21bb67dbad81e197effeba434dd94828fd7e278fae975f4ae41d84efdb2.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 632, + 544, + 728 + ], + "lines": [ + { + "bbox": [ + 67, + 632, + 544, + 728 + ], + "spans": [ + { + "bbox": [ + 67, + 632, + 544, + 728 + ], + "type": "text", + "content": "Figure S.7 | Performance of TxGemma and Gemma-2 models on MMLU. Score of TxGemma-27B-Chat, TxGemma-27B-Predict, and Gemma-2-27B on each subject of the MMLU benchmark [50], a comprehensive suite of 57 diverse tasks spanning mathematics, history, computer science, law, etc. This benchmark evaluates knowledge, reasoning, and problem-solving abilities across a wide range of academic subjects, providing a robust measure of overall language understanding. TxGemma-27B-Chat (73.87%) performs slightly below Gemma-2-27B (75.38%) but significantly better than TxGemma-27B-Predict (53.60%), suggesting that domain fine-tuning purely on therapeutic data can cause loss of information learned during base model training, but simultaneously training with general instruction-tuning data can compensate for this." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "49" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 48 + }, + { + "para_blocks": [ + { + "bbox": [ + 99, + 232, + 213, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 232, + 213, + 240 + ], + "spans": [ + { + "bbox": [ + 99, + 232, + 213, + 240 + ], + "type": "text", + "content": "Tell me about the blood-brain barrier" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 171, + 263, + 206, + 272 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 263, + 206, + 272 + ], + "spans": [ + { + "bbox": [ + 171, + 263, + 206, + 272 + ], + "type": "text", + "content": "TxGemma" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 171, + 281, + 505, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 281, + 505, + 314 + ], + "spans": [ + { + "bbox": [ + 171, + 281, + 505, + 314 + ], + "type": "text", + "content": "The blood-brain barrier (BBB) is a highly selective semipermeable border that separates the circulating blood from the brain and extracellular fluid in the central nervous system (CNS). It plays a crucial role in maintaining a stable environment for the brain, protecting it from harmful substances and pathogens while allowing the passage of essential nutrients." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 171, + 316, + 289, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 316, + 289, + 324 + ], + "spans": [ + { + "bbox": [ + 171, + 316, + 289, + 324 + ], + "type": "text", + "content": "Here are some key features of the BBB:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 171, + 327, + 505, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 327, + 505, + 350 + ], + "spans": [ + { + "bbox": [ + 171, + 327, + 505, + 350 + ], + "type": "text", + "content": "Cellular composition: The BBB is primarily formed by endothelial cells lining the capillaries in the brain. These cells are tightly connected by specialized tight junctions that restrict the movement of molecules between them." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 171, + 354, + 504, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 354, + 504, + 370 + ], + "spans": [ + { + "bbox": [ + 171, + 354, + 504, + 370 + ], + "type": "text", + "content": "Astrocytes and pericytes: These cells provide structural and functional support to the endothelial cells, contributing to the regulation of BBB permeability." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 171, + 373, + 504, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 373, + 504, + 388 + ], + "spans": [ + { + "bbox": [ + 171, + 373, + 504, + 388 + ], + "type": "text", + "content": "Selective transport mechanisms: The BBB utilizes various transport systems to regulate the passage of molecules across its barrier. These include:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 171, + 391, + 385, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 391, + 385, + 399 + ], + "spans": [ + { + "bbox": [ + 171, + 391, + 385, + 399 + ], + "type": "text", + "content": "Passive diffusion: Small lipophilic molecules can readily cross the BBB." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 171, + 402, + 504, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 402, + 504, + 418 + ], + "spans": [ + { + "bbox": [ + 171, + 402, + 504, + 418 + ], + "type": "text", + "content": "Facilitated diffusion: Carrier-mediated transport systems assist the movement of specific molecules, such as glucose and amino acids." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 171, + 420, + 505, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 420, + 505, + 437 + ], + "spans": [ + { + "bbox": [ + 171, + 420, + 505, + 437 + ], + "type": "text", + "content": "Active transport: Energy-dependent mechanisms pump molecules against their concentration gradient, such as the removal of toxins from the brain." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 171, + 439, + 505, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 439, + 505, + 456 + ], + "spans": [ + { + "bbox": [ + 171, + 439, + 505, + 456 + ], + "type": "text", + "content": "P-glycoprotein (P-gp): This efflux pump actively removes a wide range of drugs from the brain, including many central nervous system-acting medications." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 171, + 458, + 505, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 458, + 505, + 481 + ], + "spans": [ + { + "bbox": [ + 171, + 458, + 505, + 481 + ], + "type": "text", + "content": "*Impairment of the BBB can lead to various neurological disorders, including stroke, multiple sclerosis, and Alzheimer's disease. It can also allow the entry of harmful substances into the brain, such as pathogens and toxins." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 171, + 484, + 504, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 484, + 504, + 501 + ], + "spans": [ + { + "bbox": [ + 171, + 484, + 504, + 501 + ], + "type": "text", + "content": "*Researchers are actively developing strategies to overcome the BBB for drug delivery to the brain, including using nanocarriers and focused ultrasound." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 525, + 543, + 560 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 525, + 543, + 560 + ], + "spans": [ + { + "bbox": [ + 67, + 525, + 543, + 560 + ], + "type": "text", + "content": "Figure S.8 | Models can converse about general topics after fine-tuning only on the processed TDC data. Example of a dialogue with TxGemma-27B-Predict. When asked a question that is not in a processed TDC data format, the model is able to respond coherently." + } + ] + } + ], + "index": 13, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "50" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 49 + }, + { + "para_blocks": [ + { + "bbox": [ + 222, + 214, + 384, + 223 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 214, + 384, + 223 + ], + "spans": [ + { + "bbox": [ + 222, + 214, + 384, + 223 + ], + "type": "text", + "content": "Example of TxGemma-27B-Predict Failure in Conversation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 138, + 238, + 162, + 245 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 238, + 162, + 245 + ], + "spans": [ + { + "bbox": [ + 138, + 238, + 162, + 245 + ], + "type": "text", + "content": "Prompt" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 138, + 247, + 319, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 247, + 319, + 255 + ], + "spans": [ + { + "bbox": [ + 138, + 247, + 319, + 255 + ], + "type": "text", + "content": "Instructions: Answer the following question about drug properties." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 258, + 459, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 258, + 459, + 279 + ], + "spans": [ + { + "bbox": [ + 138, + 258, + 459, + 279 + ], + "type": "text", + "content": "Context: As a membrane separating circulating blood and brain extracellular fluid, the blood-brain barrier (BBB) is the protection layer that blocks most foreign drugs. Thus the ability of a drug to penetrate the barrier to deliver to the site of action forms a crucial challenge in development of drugs for central nervous system." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 139, + 281, + 291, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 281, + 291, + 289 + ], + "spans": [ + { + "bbox": [ + 139, + 281, + 291, + 289 + ], + "type": "text", + "content": "Question: Given a drug SMILES string, predict whether it" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 139, + 291, + 264, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 291, + 264, + 298 + ], + "spans": [ + { + "bbox": [ + 139, + 291, + 264, + 298 + ], + "type": "text", + "content": "(A) does not cross the BBB (B) crosses the BBB" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 139, + 300, + 378, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 300, + 378, + 307 + ], + "spans": [ + { + "bbox": [ + 139, + 300, + 378, + 307 + ], + "type": "text", + "content": "Drug SMILES: C1CN(CCC1(C=O)NC(CCO)C2=CC=C(C=C2)Cl)N)C3=NC=NC4=C3C=CN4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 139, + 309, + 164, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 309, + 164, + 316 + ], + "spans": [ + { + "bbox": [ + 139, + 309, + 164, + 316 + ], + "type": "text", + "content": "Answer:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 139, + 331, + 224, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 331, + 224, + 338 + ], + "spans": [ + { + "bbox": [ + 139, + 331, + 224, + 338 + ], + "type": "text", + "content": "Tx-Gemma-Predict Response" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 139, + 342, + 172, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 342, + 172, + 349 + ], + "spans": [ + { + "bbox": [ + 139, + 342, + 172, + 349 + ], + "type": "text", + "content": "Answer: (B)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 139, + 363, + 162, + 370 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 363, + 162, + 370 + ], + "spans": [ + { + "bbox": [ + 139, + 363, + 162, + 370 + ], + "type": "text", + "content": "Prompt" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 139, + 374, + 383, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 374, + 383, + 381 + ], + "spans": [ + { + "bbox": [ + 139, + 374, + 383, + 381 + ], + "type": "text", + "content": "What properties are useful for crossing the blood-brain barrier? Don't answer with (A) or (B)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 139, + 395, + 224, + 402 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 395, + 224, + 402 + ], + "spans": [ + { + "bbox": [ + 139, + 395, + 224, + 402 + ], + "type": "text", + "content": "Tx-Gemma-Predict Response" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 139, + 405, + 457, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 405, + 457, + 421 + ], + "spans": [ + { + "bbox": [ + 139, + 405, + 457, + 421 + ], + "type": "text", + "content": "Answer: Small molecular size, high lipophilicity, lack of ionization, and absence of extensive protein binding are generally favorable for BBB penetration." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 139, + 434, + 162, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 434, + 162, + 441 + ], + "spans": [ + { + "bbox": [ + 139, + 434, + 162, + 441 + ], + "type": "text", + "content": "Prompt" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 139, + 444, + 342, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 444, + 342, + 451 + ], + "spans": [ + { + "bbox": [ + 139, + 444, + 342, + 451 + ], + "type": "text", + "content": "Does the drug I gave you have these properties? Don't answer with (A) or (B)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 139, + 464, + 224, + 472 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 464, + 224, + 472 + ], + "spans": [ + { + "bbox": [ + 139, + 464, + 224, + 472 + ], + "type": "text", + "content": "Tx-Gemma-Predict Response" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 139, + 475, + 172, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 475, + 172, + 483 + ], + "spans": [ + { + "bbox": [ + 139, + 475, + 172, + 483 + ], + "type": "text", + "content": "Answer: (B)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 67, + 505, + 542, + 577 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 505, + 542, + 577 + ], + "spans": [ + { + "bbox": [ + 67, + 505, + 542, + 577 + ], + "type": "text", + "content": "Figure S.9 | Predictive model struggles to hold a conversation about processed TDC data prompts after fine-tuning only on this data. Example of a multi-turn dialogue with TxGemma-27B-Predict. The initial turn is a question in the processed TDC data format. After the initial turn, the model is prompted to describe some properties associated with the problem of crossing the blood-brain barrier. An additional instruction to avoid answering with (A) or (B) is necessary. The model is able to provide some answers in natural language, but another turn leads to an inappropriate multiple choice answer." + } + ] + } + ], + "index": 18, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 540, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 540, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 540, + 751 + ], + "type": "text", + "content": "51" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 50 + }, + { + "para_blocks": [ + { + "bbox": [ + 90, + 77, + 441, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 77, + 441, + 111 + ], + "spans": [ + { + "bbox": [ + 90, + 77, + 441, + 111 + ], + "type": "text", + "content": "Provide five steps of reasoning to think through the problem and then provide an answer. Let's think step by step. Consider whether the drug would pass phase 1 trials, not whether the drug would be fully approved. You must also discuss the specific drug and disease as part of your reasoning that leads to your answer. This should be emphasized." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 90, + 115, + 294, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 115, + 294, + 125 + ], + "spans": [ + { + "bbox": [ + 90, + 115, + 294, + 125 + ], + "type": "text", + "content": "Instructions: Answer the following question about clinical trials." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 89, + 129, + 441, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 129, + 441, + 180 + ], + "spans": [ + { + "bbox": [ + 89, + 129, + 441, + 180 + ], + "type": "text", + "content": "Context: Clinical trial is the most time and cost-consuming step in the drug discovery process. Phase 1 clinical trials test the safety and basic properties of a new drug or treatment in a small group of people for the first time. Optimizing and designing trials with machine learning could drastically lead to the speedup of delivery of life-saving therapeutics to patients. Clinical trial outcome prediction is a machine learning task that aims to forecast the outcome of clinical trials, such as the approval rate of a drug or treatment. It utilizes various clinical trial features, including the drug's molecular structure and patient disease." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 90, + 183, + 335, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 183, + 335, + 201 + ], + "spans": [ + { + "bbox": [ + 90, + 183, + 335, + 201 + ], + "type": "text", + "content": "Question: Given a drug SMILES string and disease, predict if the phase 1 trial (A) would not be approved (B) would be approved" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 90, + 206, + 370, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 206, + 370, + 216 + ], + "spans": [ + { + "bbox": [ + 90, + 206, + 370, + 216 + ], + "type": "text", + "content": "Drug SMILES: CC1=C(SC(=N1)NC(=O)N2CCCCC2(=O)N)C3=CC(=NC=C3)C(C)(C)(C(F)(F)F" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 89, + 220, + 441, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 220, + 441, + 246 + ], + "spans": [ + { + "bbox": [ + 89, + 220, + 441, + 246 + ], + "type": "text", + "content": "Disease: Malignant neoplasm of ovary: Ovarian cancer is a disease that affects women. In this form of cancer, certain cells in the ovary become abnormal and multiply uncontrollably to form a tumor. The ovaries are the female reproductive organs in which egg cells are produced. In about 90 percent of cases, ..." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 90, + 250, + 292, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 250, + 292, + 259 + ], + "spans": [ + { + "bbox": [ + 90, + 250, + 292, + 259 + ], + "type": "text", + "content": "Remember: (A) would not be approved (B) would be approved" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 91, + 266, + 130, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 266, + 130, + 276 + ], + "spans": [ + { + "bbox": [ + 91, + 266, + 130, + 276 + ], + "type": "text", + "content": "Reasoning:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 162, + 295, + 202, + 305 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 295, + 202, + 305 + ], + "spans": [ + { + "bbox": [ + 162, + 295, + 202, + 305 + ], + "type": "text", + "content": "TxGemma" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 164, + 315, + 432, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 315, + 432, + 324 + ], + "spans": [ + { + "bbox": [ + 164, + 315, + 432, + 324 + ], + "type": "text", + "content": "Here's a step-by-step reasoning process to predict the outcome of the Phase 1 trial:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 164, + 338, + 304, + 347 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 338, + 304, + 347 + ], + "spans": [ + { + "bbox": [ + 164, + 338, + 304, + 347 + ], + "type": "text", + "content": "1. Analyze the Drug's Chemical Structure:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 164, + 350, + 515, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 350, + 515, + 367 + ], + "spans": [ + { + "bbox": [ + 164, + 350, + 515, + 367 + ], + "type": "text", + "content": "Observation: The SMILES string indicates a complex molecule with multiple aromatic rings, fluorine atoms, and amide groups." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 164, + 369, + 515, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 369, + 515, + 386 + ], + "spans": [ + { + "bbox": [ + 164, + 369, + 515, + 386 + ], + "type": "text", + "content": "Reasoning: Such structures often point to potential biological activity, but also raise concerns about potential toxicity and side effects." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 164, + 389, + 247, + 397 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 389, + 247, + 397 + ], + "spans": [ + { + "bbox": [ + 164, + 389, + 247, + 397 + ], + "type": "text", + "content": "2. Consider the Disease:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 164, + 400, + 488, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 400, + 488, + 410 + ], + "spans": [ + { + "bbox": [ + 164, + 400, + 488, + 410 + ], + "type": "text", + "content": "Observation: Ovarian cancer is a serious and often aggressive disease with limited treatment options." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 164, + 412, + 515, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 412, + 515, + 430 + ], + "spans": [ + { + "bbox": [ + 164, + 412, + 515, + 430 + ], + "type": "text", + "content": "Reasoning: The unmet medical need is high, meaning there's a greater chance for a new drug to be approved if it shows any promise of efficacy." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 164, + 444, + 264, + 453 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 444, + 264, + 453 + ], + "spans": [ + { + "bbox": [ + 164, + 444, + 264, + 453 + ], + "type": "text", + "content": "3. Evaluate Potential Toxicity:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 164, + 455, + 486, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 455, + 486, + 464 + ], + "spans": [ + { + "bbox": [ + 164, + 455, + 486, + 464 + ], + "type": "text", + "content": "Observation: We don't have specific information about the drug's toxicity profile from the given data." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 164, + 466, + 514, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 466, + 514, + 484 + ], + "spans": [ + { + "bbox": [ + 164, + 466, + 514, + 484 + ], + "type": "text", + "content": "Reasoning: Phase 1 trials primarily focus on safety. The drug's chemical structure suggests potential for toxicity, which would need to be carefully assessed in the trial." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 164, + 498, + 332, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 498, + 332, + 506 + ], + "spans": [ + { + "bbox": [ + 164, + 498, + 332, + 506 + ], + "type": "text", + "content": "4. Assess the Likelihood of Demonstrating Safety:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 164, + 510, + 515, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 510, + 515, + 526 + ], + "spans": [ + { + "bbox": [ + 164, + 510, + 515, + 526 + ], + "type": "text", + "content": "Observation: While the drug's structure raises concerns, there's no information suggesting inherent, severe toxicity that would preclude Phase 1 testing." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 164, + 529, + 515, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 529, + 515, + 555 + ], + "spans": [ + { + "bbox": [ + 164, + 529, + 515, + 555 + ], + "type": "text", + "content": "Reasoning: Most drugs with novel structures undergo preclinical testing before reaching Phase 1. If the drug passed those tests, it likely has a reasonable chance of demonstrating acceptable safety in a small group of patients." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 164, + 568, + 249, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 568, + 249, + 578 + ], + "spans": [ + { + "bbox": [ + 164, + 568, + 249, + 578 + ], + "type": "text", + "content": "5. Weighing the Factors:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 164, + 580, + 510, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 580, + 510, + 590 + ], + "spans": [ + { + "bbox": [ + 164, + 580, + 510, + 590 + ], + "type": "text", + "content": "Observation: We have a potentially effective drug for a serious disease, but with potential toxicity concerns." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 164, + 592, + 515, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 592, + 515, + 617 + ], + "spans": [ + { + "bbox": [ + 164, + 592, + 515, + 617 + ], + "type": "text", + "content": "Reasoning: Phase 1 trials are designed to assess safety and dosage. Even if the drug has some side effects, it's likely to be approved to proceed to further testing if it shows any signs of efficacy and the benefits outweigh the risks in the context of a serious disease." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 164, + 624, + 268, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 624, + 268, + 634 + ], + "spans": [ + { + "bbox": [ + 164, + 624, + 268, + 634 + ], + "type": "text", + "content": "Answer: (B) would be approved" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 67, + 664, + 542, + 736 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 664, + 542, + 736 + ], + "spans": [ + { + "bbox": [ + 67, + 664, + 542, + 736 + ], + "type": "text", + "content": "Figure S.10 | Enabling reasoning with TxGemma-Chat on challenging tasks. Example of prompting TxGemma-27B-Chat to explain its prediction for predicting clinical trial approval, a relatively unintuitive task. A prefix is added before the prompt to provide instructions for reasoning, and a reminder is added at the end so the model correctly specifies the option corresponding to its desired answer. Lastly, the \"Answer\" text is changed to \"Reasoning:\" to enable reasoning steps. The reasoning provided by the model is not comprehensive but can provide useful insights into the drug-disease interaction." + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "52" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 51 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 165, + 84, + 441, + 223 + ], + "blocks": [ + { + "bbox": [ + 165, + 84, + 441, + 223 + ], + "lines": [ + { + "bbox": [ + 165, + 84, + 441, + 223 + ], + "spans": [ + { + "bbox": [ + 165, + 84, + 441, + 223 + ], + "type": "image", + "image_path": "50aa41bd72408b2d1964da82f8d4a59c63c3870b25b0effe6eb0301bce2fba97.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 240, + 542, + 275 + ], + "lines": [ + { + "bbox": [ + 67, + 240, + 542, + 275 + ], + "spans": [ + { + "bbox": [ + 67, + 240, + 542, + 275 + ], + "type": "text", + "content": "Figure S.11 | Inference speed of TxGemma models. The number of examples inferred per day at different model sizes, normalized by the number of TPUv5e chips used for serving. The PPBR AZ task was used for the benchmarking due to its reasonable size." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 192, + 316, + 417, + 495 + ], + "blocks": [ + { + "bbox": [ + 192, + 316, + 417, + 495 + ], + "lines": [ + { + "bbox": [ + 192, + 316, + 417, + 495 + ], + "spans": [ + { + "bbox": [ + 192, + 316, + 417, + 495 + ], + "type": "image", + "image_path": "b821a33a107593bfb04127fe8375bcedb99541d1716ad89c30f99bc5c74a7f45.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 190, + 503, + 416, + 633 + ], + "blocks": [ + { + "bbox": [ + 190, + 503, + 416, + 633 + ], + "lines": [ + { + "bbox": [ + 190, + 503, + 416, + 633 + ], + "spans": [ + { + "bbox": [ + 190, + 503, + 416, + 633 + ], + "type": "image", + "image_path": "595c78f89cc375092a21ef0c686aac0157bd0fd7a3be1241a02d3da171359946.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 643, + 542, + 700 + ], + "lines": [ + { + "bbox": [ + 67, + 643, + 542, + 700 + ], + "spans": [ + { + "bbox": [ + 67, + 643, + 542, + 700 + ], + "type": "text", + "content": "Figure S.12 | Contamination analysis. (top) Out of 66 tasks, " + }, + { + "bbox": [ + 67, + 643, + 542, + 700 + ], + "type": "inline_equation", + "content": "23\\%" + }, + { + "bbox": [ + 67, + 643, + 542, + 700 + ], + "type": "text", + "content": " had some datapoints in the test set that were found in the Gemma-2 pretraining data, while " + }, + { + "bbox": [ + 67, + 643, + 542, + 700 + ], + "type": "inline_equation", + "content": "77\\%" + }, + { + "bbox": [ + 67, + 643, + 542, + 700 + ], + "type": "text", + "content": " did not. For tasks that had some contaminated datapoints, we plot the percent of the test set that was contaminated. (bottom) Distributions of cosine similarities between SMILES string embeddings and molecular name embeddings. Decoy name embeddings indicate a random different molecule name." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "53" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 52 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 109, + 339, + 285 + ], + "blocks": [ + { + "bbox": [ + 107, + 109, + 339, + 285 + ], + "lines": [ + { + "bbox": [ + 107, + 109, + 339, + 285 + ], + "spans": [ + { + "bbox": [ + 107, + 109, + 339, + 285 + ], + "type": "image", + "image_path": "a06c6ddce36e0a8cc6fde75d0a6734d0239ad730b94f9224094944eb29afbd53.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 297, + 544, + 357 + ], + "lines": [ + { + "bbox": [ + 67, + 297, + 544, + 357 + ], + "spans": [ + { + "bbox": [ + 67, + 297, + 544, + 357 + ], + "type": "text", + "content": "Figure S.13 | Model performance after filtering contaminated datapoints. Performance of TxGemma-27B-Predict on both original unfiltered test sets and filtered test sets in which contaminated datapoints were removed. (left) For these tasks, higher values correspond to better models, and the metrics are defined in Tables S.13 and S.14. Error bars are bootstrapped standard errors. (right) For these tasks, lower values correspond to better models, and the metrics (either MAE or MSE) are defined in Tables S.13 and S.14. Error bars are bootstrapped standard errors." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 361, + 110, + 503, + 274 + ], + "blocks": [ + { + "bbox": [ + 361, + 110, + 503, + 274 + ], + "lines": [ + { + "bbox": [ + 361, + 110, + 503, + 274 + ], + "spans": [ + { + "bbox": [ + 361, + 110, + 503, + 274 + ], + "type": "image", + "image_path": "5a77174bc677da9772e91115ac640fda919da96c5104c827eee2013682a15b8a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 107, + 445, + 309, + 608 + ], + "blocks": [ + { + "bbox": [ + 107, + 445, + 309, + 608 + ], + "lines": [ + { + "bbox": [ + 107, + 445, + 309, + 608 + ], + "spans": [ + { + "bbox": [ + 107, + 445, + 309, + 608 + ], + "type": "image", + "image_path": "8831205666fac2be3162fb8e88173d029338f92c0c4193c627a45407203b69d9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 617, + 544, + 678 + ], + "lines": [ + { + "bbox": [ + 67, + 617, + 544, + 678 + ], + "spans": [ + { + "bbox": [ + 67, + 617, + 544, + 678 + ], + "type": "text", + "content": "Figure S.14 | Breakdown of tool-usage frequency for Chemical Preference dataset and HLE dataset. Agentic-Tx adapts its tool usage to reason effectively about different tasks. For Chemical Preference, which requires evaluating drug candidates, the system correctly invokes tools for molecular characterization and safety assessment, such as SMILES description and toxicity prediction. For the Bio+Med task, focused on complex biomedical questions, the agent prioritizes PubMed and Wikipedia, demonstrating reliance on broad knowledge retrieval and synthesis." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 312, + 445, + 503, + 608 + ], + "blocks": [ + { + "bbox": [ + 312, + 445, + 503, + 608 + ], + "lines": [ + { + "bbox": [ + 312, + 445, + 503, + 608 + ], + "spans": [ + { + "bbox": [ + 312, + 445, + 503, + 608 + ], + "type": "image", + "image_path": "bb7daa0ab3745fb49912d1219615736ceb002d7d69fafd8c7a73819734f71a09.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "54" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 53 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 73, + 541, + 203 + ], + "blocks": [ + { + "bbox": [ + 72, + 73, + 541, + 203 + ], + "lines": [ + { + "bbox": [ + 72, + 73, + 541, + 203 + ], + "spans": [ + { + "bbox": [ + 72, + 73, + 541, + 203 + ], + "type": "image", + "image_path": "0dbc4689ed18c4fc12b7ae53b6420d0f6eb26f24706c74f033eb5e0918cf8339.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 220, + 544, + 282 + ], + "lines": [ + { + "bbox": [ + 67, + 220, + 544, + 282 + ], + "spans": [ + { + "bbox": [ + 67, + 220, + 544, + 282 + ], + "type": "text", + "content": "Figure S.15 | Breakdown of tool-usage per question in chemical preference dataset. Marker size represents usage count and corresponds to the number of uses per each tool; blue indicates accuracy increase, light red indicates decrease associated with each tool per question. We observe questions involve up to 8 tool calls. High usage of SMILES description and toxicity prediction correlates with improved performance. This demonstrates Agentic-Tx's adaptive tool selection to meet task requirements and improved performance." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 72, + 300, + 223, + 430 + ], + "blocks": [ + { + "bbox": [ + 72, + 300, + 223, + 430 + ], + "lines": [ + { + "bbox": [ + 72, + 300, + 223, + 430 + ], + "spans": [ + { + "bbox": [ + 72, + 300, + 223, + 430 + ], + "type": "image", + "image_path": "a9c5a0cec4eba54bfa2c62a6203b620f76c3bf7a0500cf31babec23662e84c49.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 445, + 544, + 517 + ], + "lines": [ + { + "bbox": [ + 67, + 445, + 544, + 517 + ], + "spans": [ + { + "bbox": [ + 67, + 445, + 544, + 517 + ], + "type": "text", + "content": "Figure S.16 | Ability to combine SMILES and text is independent of model size. Median relative change of TxGemma-27B-Predict, TxGemma-9B-Predict and TxGemma-2B-Predict performance from SOTA for tasks grouped by feature type. The signs were reversed for MAE and MSE metrics because lower MAE and MSE values correspond to better performances. The number of tasks in each feature type is displayed over each bar. In all models, over " + }, + { + "bbox": [ + 67, + 445, + 544, + 517 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 67, + 445, + 544, + 517 + ], + "type": "text", + "content": " of tasks had a median relative performance change greater than -0.2, and SMILES + Text consistently outperformed SOTA." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 230, + 300, + 380, + 429 + ], + "blocks": [ + { + "bbox": [ + 230, + 300, + 380, + 429 + ], + "lines": [ + { + "bbox": [ + 230, + 300, + 380, + 429 + ], + "spans": [ + { + "bbox": [ + 230, + 300, + 380, + 429 + ], + "type": "image", + "image_path": "98955b38edb8949ab00f726cf43f2ccf404666fb5bdd25f71cd16900b64edab5.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 389, + 300, + 539, + 430 + ], + "blocks": [ + { + "bbox": [ + 389, + 300, + 539, + 430 + ], + "lines": [ + { + "bbox": [ + 389, + 300, + 539, + 430 + ], + "spans": [ + { + "bbox": [ + 389, + 300, + 539, + 430 + ], + "type": "image", + "image_path": "abcd5f2594648ea69cc611d92f1720d998830411dd4e40aca9dce191977179fb.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 72, + 536, + 282, + 663 + ], + "blocks": [ + { + "bbox": [ + 72, + 536, + 282, + 663 + ], + "lines": [ + { + "bbox": [ + 72, + 536, + 282, + 663 + ], + "spans": [ + { + "bbox": [ + 72, + 536, + 282, + 663 + ], + "type": "image", + "image_path": "53ef111b2c7457fddcdcb839e97bb4a0565136bb40ee6962ec4d103f9efc3a79.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 677, + 544, + 713 + ], + "lines": [ + { + "bbox": [ + 67, + 677, + 544, + 713 + ], + "spans": [ + { + "bbox": [ + 67, + 677, + 544, + 713 + ], + "type": "text", + "content": "Figure S.17 | Ablations of model sizes and model adaptations. (left) Relative performance changes for pairwise comparisons of TxGemma-Predict models (TxGemma-2B-Predict, TxGemma-9B-Predict, TxGemma-27B-Predict). (right) Relative performance changes of TxGemma models compared to their respective base models." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 328, + 536, + 539, + 662 + ], + "blocks": [ + { + "bbox": [ + 328, + 536, + 539, + 662 + ], + "lines": [ + { + "bbox": [ + 328, + 536, + 539, + 662 + ], + "spans": [ + { + "bbox": [ + 328, + 536, + 539, + 662 + ], + "type": "image", + "image_path": "97a2ed49d9a6a34eb3566d17f60c2015ee4e18897569ccfbdda77a9076bdca00.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "55" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 54 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 212, + 304, + 396, + 430 + ], + "blocks": [ + { + "bbox": [ + 212, + 304, + 396, + 430 + ], + "lines": [ + { + "bbox": [ + 212, + 304, + 396, + 430 + ], + "spans": [ + { + "bbox": [ + 212, + 304, + 396, + 430 + ], + "type": "image", + "image_path": "5440136090b3e3dd0547d3116cd363c58ded13e0ceb7ef554d37f45cd179852c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 445, + 544, + 481 + ], + "lines": [ + { + "bbox": [ + 67, + 445, + 544, + 481 + ], + "spans": [ + { + "bbox": [ + 67, + 445, + 544, + 481 + ], + "type": "text", + "content": "Figure S.18 | TxGemma predictions show correlations between toxicity and clinical trial approval. Spearman correlation coefficients between toxicity predictions (measured by AMES, DILI, and hERG central) and clinical trial predictions (measured by Phase1, Phase2, and Phase3) on a set of PubChem molecules." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "56" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 55 + }, + { + "para_blocks": [ + { + "bbox": [ + 74, + 72, + 140, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 72, + 140, + 83 + ], + "spans": [ + { + "bbox": [ + 74, + 72, + 140, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 95, + 543, + 708 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 72, + 95, + 542, + 114 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 95, + 542, + 114 + ], + "spans": [ + { + "bbox": [ + 72, + 95, + 542, + 114 + ], + "type": "text", + "content": "1. Chen, J., Hu, Y., Wang, Y., Lu, Y., Cao, X., Lin, M., Xu, H., Wu, J., Xiao, C., Sun, J., et al. TrialBench: Multi-modal artificial intelligence-ready clinical trial datasets. arXiv preprint arXiv:2407.00631 (2024)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 72, + 116, + 543, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 116, + 543, + 142 + ], + "spans": [ + { + "bbox": [ + 72, + 116, + 543, + 142 + ], + "type": "text", + "content": "2. Kuo, K.-T., Mao, T.-L., Jones, S., Veras, E., Ayhan, A., Wang, T.-L., Glas, R., Slamon, D., Velculescu, V. E., Kuman, R. J., et al. Frequent activating mutations of PIK3CA in ovarian clear cell carcinoma. The American journal of pathology 174, 1597-1601 (2009)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 72, + 144, + 542, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 144, + 542, + 163 + ], + "spans": [ + { + "bbox": [ + 72, + 144, + 542, + 163 + ], + "type": "text", + "content": "3. Leontiadou, H., Galdadas, I., Athanasiou, C. & Cournia, Z. Insights into the mechanism of the PIK3CA E545K activating mutation using MD simulations. Scientific reports 8, 15544 (2018)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 72, + 165, + 542, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 165, + 542, + 183 + ], + "spans": [ + { + "bbox": [ + 72, + 165, + 542, + 183 + ], + "type": "text", + "content": "4. Chen, H., Si, Y., Wen, J., Hu, C., Xia, E., Wang, Y. & Wang, O. P110α inhibitor alpelisib exhibits a synergistic effect with pyrotinib and reverses pyrotinib resistant in HER2+ breast cancer. Neoplasia 43, 100913 (2023)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 72, + 185, + 542, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 185, + 542, + 212 + ], + "spans": [ + { + "bbox": [ + 72, + 185, + 542, + 212 + ], + "type": "text", + "content": "5. Fritsch, C., Huang, A., Chatenay-Rivauday, C., Schnell, C., Reddy, A., Liu, M., Kauffmann, A., Guthy, D., Erdmann, D., De Pover, A., et al. Characterization of the novel and specific PI3Kα inhibitor NVP-BYL719 and development of the patient stratification strategy for clinical trials. Molecular cancer therapeutics 13, 1117-1129 (2014)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 72, + 213, + 542, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 213, + 542, + 239 + ], + "spans": [ + { + "bbox": [ + 72, + 213, + 542, + 239 + ], + "type": "text", + "content": "6. Narayan, P., Prowell, T. M., Gao, J. J., Fernandes, L. L., Li, E., Jiang, X., Qiu, J., Fan, J., Song, P., Yu, J., et al. FDA approval summary: alpelisib plus fulvestrant for patients with HR-positive, HER2-negative, PIK3CA-mutated, advanced or metastatic breast cancer. Clinical Cancer Research 27, 1842-1849 (2021)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 72, + 241, + 542, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 241, + 542, + 269 + ], + "spans": [ + { + "bbox": [ + 72, + 241, + 542, + 269 + ], + "type": "text", + "content": "7. Passarelli, A., Carbone, V., Pignata, S., Mazzeo, R., Lorusso, D., Scambia, G., Canova, S., Di Palma, T., Tasca, G., Mantiero, M., et al. Alpelisib for PIK3CA-mutated advanced gynecological cancers: first clues of clinical activity. *Gynecologic Oncology* 183, 61-67 (2024)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 72, + 270, + 542, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 270, + 542, + 289 + ], + "spans": [ + { + "bbox": [ + 72, + 270, + 542, + 289 + ], + "type": "text", + "content": "8. Thibault, B., Thole, A., D'Angelo, R., Basset, C. & Guillermet-Guibert, J. PI3Kα-specific inhibitor BYL-719 synergizes with cisplatin in vitro in PIK3CA-mutated ovarian cancer cells. Scientific Reports 15, 6265 (2025)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 72, + 291, + 542, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 291, + 542, + 317 + ], + "spans": [ + { + "bbox": [ + 72, + 291, + 542, + 317 + ], + "type": "text", + "content": "9. Hu, X., Xia, M., Wang, J., Yu, H., Chai, J., Zhang, Z., Sun, Y., Su, J. & Sun, L. Dual PI3K/mTOR inhibitor PKI-402 suppresses the growth of ovarian cancer cells by degradation of Mcl-1 through autophagy. Biomedicine & Pharmacotherapy 129, 110397 (2020)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 319, + 542, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 319, + 542, + 338 + ], + "spans": [ + { + "bbox": [ + 69, + 319, + 542, + 338 + ], + "type": "text", + "content": "10. Turon, G., Hlozek, J., Woodland, J. G., Kumar, A., Chibale, K. & Duran-Frigola, M. First fully-automated AI/ML virtual screening cascade implemented at a drug discovery centre in Africa. Nature Communications 14, 5736 (2023)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 339, + 542, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 339, + 542, + 357 + ], + "spans": [ + { + "bbox": [ + 69, + 339, + 542, + 357 + ], + "type": "text", + "content": "11. Fontenot, R., Kathad, U., McDermott, J., Sturtevant, D., Sharma, P. & Carr, P. Predicting a Compounds Blood-Brain-Barrier Permeability with Lantern Pharma's AI and ML Platform, RADR 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 358, + 424, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 358, + 424, + 369 + ], + "spans": [ + { + "bbox": [ + 69, + 358, + 424, + 369 + ], + "type": "text", + "content": "12. Bera, S., Dent, J., Gill, G., Stolman, A. & Wu, B. SimGCN for TDC Benchmarks (2022)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 372, + 542, + 391 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 372, + 542, + 391 + ], + "spans": [ + { + "bbox": [ + 69, + 372, + 542, + 391 + ], + "type": "text", + "content": "13. Plonka, W., Stork, C., Šićho, M. & Kirchmair, J. CYPlebrity: Machine learning models for the prediction of inhibitors of cytochrome P450 enzymes. Bioorganic & medicinal chemistry 46, 116388 (2021)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 392, + 542, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 392, + 542, + 411 + ], + "spans": [ + { + "bbox": [ + 69, + 392, + 542, + 411 + ], + "type": "text", + "content": "14. Hu, W., Liu, B., Gomes, J., Zitnik, M., Liang, P., Pande, V. & Leskovec, J. Strategies for pre-training graph neural networks. arXiv preprint arXiv:1905.12265 (2019)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 412, + 542, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 412, + 542, + 430 + ], + "spans": [ + { + "bbox": [ + 69, + 412, + 542, + 430 + ], + "type": "text", + "content": "15. Huang, K., Fu, T., Glass, L. M., Zitnik, M., Xiao, C. & Sun, J. DeepPurpose: a deep learning library for drug-target interaction prediction. Bioinformatics 36, 5545-5547 (2020)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 432, + 542, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 432, + 542, + 450 + ], + "spans": [ + { + "bbox": [ + 69, + 432, + 542, + 450 + ], + "type": "text", + "content": "16. Lagunin, A., Filimonov, D., Zakharov, A., Xie, W., Huang, Y., Zhu, F., Shen, T., Yao, J. & Poroikov, V. Computer-aided prediction of rodent carcinogenicity by PASS and CISOC-PSCT. QSAR & Combinatorial Science 28, 806-810 (2009)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 452, + 542, + 471 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 452, + 542, + 471 + ], + "spans": [ + { + "bbox": [ + 69, + 452, + 542, + 471 + ], + "type": "text", + "content": "17. Li, P., Li, Y., Hsieh, C.-Y., Zhang, S., Liu, X., Liu, H., Song, S. & Yao, X. TrimNet: learning molecular representation from triplet messages for biomedicine. Briefings in Bioinformatics 22, bbaa266 (2021)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 472, + 542, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 472, + 542, + 491 + ], + "spans": [ + { + "bbox": [ + 69, + 472, + 542, + 491 + ], + "type": "text", + "content": "18. Huang, D., Chowdhuri, S. R., Li, A., Li, A., Agrawal, A., Gano, K. & Zhu, A. A Unified System for Molecular Property Predictions: Oloren ChemEngine and its Applications (2022)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 69, + 492, + 542, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 492, + 542, + 502 + ], + "spans": [ + { + "bbox": [ + 69, + 492, + 542, + 502 + ], + "type": "text", + "content": "19. Li, J., Cai, D. & He, X. Learning graph-level representation for drug discovery. arXiv preprint arXiv:1709.03741 (2017)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 504, + 542, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 504, + 542, + 522 + ], + "spans": [ + { + "bbox": [ + 69, + 504, + 542, + 522 + ], + "type": "text", + "content": "20. Raimondi, D., Simm, J., Arany, A. & Moreau, Y. A novel method for data fusion over entity-relation graphs and its application to protein-protein interaction prediction. Bioinformatics 37, 2275-2281 (2021)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 69, + 524, + 542, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 524, + 542, + 550 + ], + "spans": [ + { + "bbox": [ + 69, + 524, + 542, + 550 + ], + "type": "text", + "content": "21. Gfeller, D., Schmidt, J., Croce, G., Guillaume, P., Bobisse, S., Genolet, R., Queiroz, L., Cesbron, J., Racle, J. & Harari, A. Improved predictions of antigen presentation and TCR recognition with MixMHCpred2. 2 and PRIME2. 0 reveal potent SARS-CoV-2 CD8+ T-cell epitopes. Cell Systems 14, 72-83 (2023)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 69, + 552, + 542, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 552, + 542, + 578 + ], + "spans": [ + { + "bbox": [ + 69, + 552, + 542, + 578 + ], + "type": "text", + "content": "22. Motmaen, A., Dauparas, J., Baek, M., Abedi, M. H., Baker, D. & Bradley, P. Peptide-binding specificity prediction using fine-tuned protein structure prediction networks. Proceedings of the National Academy of Sciences 120, e2216697120 (2023)." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 69, + 581, + 542, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 581, + 542, + 599 + ], + "spans": [ + { + "bbox": [ + 69, + 581, + 542, + 599 + ], + "type": "text", + "content": "23. Siramshetty, V., Williams, J., Nguyen, D., Neyra, J., Southall, N., Mathé, E., Xu, X. & Shah, P. Validating ADME QSAR models using marketed drugs. SLAS DISCOVERY: Advancing the Science of Drug Discovery 26, 1326-1336 (2021)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 69, + 601, + 542, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 601, + 542, + 619 + ], + "spans": [ + { + "bbox": [ + 69, + 601, + 542, + 619 + ], + "type": "text", + "content": "24. Haneczok, J. & Delijewski, M. Machine learning enabled identification of potential SARS-CoV-2 3CLpro inhibitors based on fixed molecular fingerprints and Graph-CNN neural representations. Journal of Biomedical Informatics 119, 103821 (2021)." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 69, + 621, + 542, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 621, + 542, + 639 + ], + "spans": [ + { + "bbox": [ + 69, + 621, + 542, + 639 + ], + "type": "text", + "content": "25. Liu, Y., Wu, Y., Shen, X. & Xie, L. COVID-19 multi-targeted drug repurposing using few-shot learning. Frontiers in Bioinformatics 1, 693177 (2021)." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 69, + 641, + 542, + 659 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 641, + 542, + 659 + ], + "spans": [ + { + "bbox": [ + 69, + 641, + 542, + 659 + ], + "type": "text", + "content": "26. Chen, X., Dougherty, T., Hong, C., Schibler, R., Zhao, Y. C., Sadeghi, R., Matasci, N., Wu, Y.-C. & Kerman, I. Predicting antibody developability from sequence using machine learning. *biorxiv*, 2020-06 (2020)." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 69, + 661, + 542, + 688 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 661, + 542, + 688 + ], + "spans": [ + { + "bbox": [ + 69, + 661, + 542, + 688 + ], + "type": "text", + "content": "27. Alves, V. M., Muratov, E., Fourches, D., Strickland, J., Kleinstreuer, N., Andrade, C. H. & Tropsha, A. Predicting chemically-induced skin reactions. Part I: QSAR models of skin sensitization and their application to identify potentially hazardous compounds. Toxicology and applied pharmacology 284, 262-272 (2015)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 69, + 689, + 542, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 689, + 542, + 708 + ], + "spans": [ + { + "bbox": [ + 69, + 689, + 542, + 708 + ], + "type": "text", + "content": "28. Shermukhamedov, S., Mamurjonova, D. & Probst, M. Structure to Property: Chemical Element Embeddings and a Deep Learning Approach for Accurate Prediction of Chemical Properties. arXiv preprint arXiv:2309.09355 (2023)." + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "57" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 56 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 543, + 538 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 67, + 72, + 543, + 91 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 543, + 91 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 543, + 91 + ], + "type": "text", + "content": "29. Vu, O., Mendenhall, J., Altarawy, D. & Meiler, J. BCL.: Mol2D—a robust atom environment descriptor for QSAR modeling and lead optimization. Journal of computer-aided molecular design 33, 477–486 (2019)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 92, + 543, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 92, + 543, + 112 + ], + "spans": [ + { + "bbox": [ + 67, + 92, + 543, + 112 + ], + "type": "text", + "content": "30. Karim, A., Lee, M., Balle, T. & Sattar, A. CardioTox net: a robust predictor for hERG channel blockade based on deep learning meta-feature ensembles. Journal of Cheminformatics 13, 1-13 (2021)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 113, + 542, + 131 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 113, + 542, + 131 + ], + "spans": [ + { + "bbox": [ + 67, + 113, + 542, + 131 + ], + "type": "text", + "content": "31. Korotcov, A., Tkachenko, V., Russo, D. P. & Ekins, S. Comparison of deep learning with multiple machine learning methods and metrics using diverse drug discovery data sets. Molecular pharmaceutics 14, 4462-4475 (2017)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 133, + 542, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 133, + 542, + 152 + ], + "spans": [ + { + "bbox": [ + 67, + 133, + 542, + 152 + ], + "type": "text", + "content": "32. Wong, L., You, Z.-H., Guo, Z.-H., Yi, H.-C., Chen, Z.-H. & Cao, M.-Y. MIPDH: a novel computational model for predicting microRNA-mRNA interactions by DeepWalk on a heterogeneous network. ACS omega 5, 17022-17032 (2020)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 152, + 542, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 152, + 542, + 171 + ], + "spans": [ + { + "bbox": [ + 67, + 152, + 542, + 171 + ], + "type": "text", + "content": "33. Fu, T., Huang, K., Xiao, C., Glass, L. M. & Sun, J. Hint: Hierarchical interaction network for clinical-trial-outcome predictions. *Patterns* 3 (2022)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 172, + 542, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 172, + 542, + 191 + ], + "spans": [ + { + "bbox": [ + 67, + 172, + 542, + 191 + ], + "type": "text", + "content": "34. Weber, A., Born, J. & Rodriguez Martínez, M. TITAN: T-cell receptor specificity prediction with bimodal attention networks. Bioinformatics 37, i237-i244 (2021)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 192, + 542, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 192, + 542, + 220 + ], + "spans": [ + { + "bbox": [ + 67, + 192, + 542, + 220 + ], + "type": "text", + "content": "35. Lam, H. T., Sbodio, M. L., Galindo, M. M., Zayats, M., Fernandez-Diaz, R., Valls, V., Picco, G., Ramis, C. B. & Lopez, V. Otter-Knowledge: benchmarks of multimodal knowledge graph representation learning from different sources for drug discovery. arXiv preprint arXiv:2306.12802 (2023)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 221, + 542, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 221, + 542, + 248 + ], + "spans": [ + { + "bbox": [ + 67, + 221, + 542, + 248 + ], + "type": "text", + "content": "36. Kinnings, S. L., Liu, N., Tonge, P. J., Jackson, R. M., Xie, L. & Bourne, P. E. A machine learning-based method to improve docking scoring functions and its application to drug repurposing. Journal of chemical information and modeling 51, 408-419 (2011)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 249, + 542, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 249, + 542, + 269 + ], + "spans": [ + { + "bbox": [ + 67, + 249, + 542, + 269 + ], + "type": "text", + "content": "37. Kalemati, M., Zamani Emani, M. & Koohi, S. BiComp-DTA: Drug-target binding affinity prediction through complementary biological-related and compression-based featurization approach. PLOS Computational Biology 19, e1011036 (2023)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 270, + 533, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 270, + 533, + 281 + ], + "spans": [ + { + "bbox": [ + 67, + 270, + 533, + 281 + ], + "type": "text", + "content": "38. Wei, B. & Gong, X. DeepPLA: a novel deep learning-based model for protein-ligand binding affinity prediction (2021)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 281, + 542, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 281, + 542, + 300 + ], + "spans": [ + { + "bbox": [ + 67, + 281, + 542, + 300 + ], + "type": "text", + "content": "39. Probst, D., Schwaller, P. & Reymond, J.-L. Reaction classification and yield prediction using the differential reaction fingerprint DRFP. Digital discovery 1, 91-97 (2022)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 301, + 542, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 301, + 542, + 320 + ], + "spans": [ + { + "bbox": [ + 67, + 301, + 542, + 320 + ], + "type": "text", + "content": "40. Rivera, Z. A., Tayo, L., Chen, B.-Y. & Tsai, P.-W. In silico Evaluation of the Feasibility of Magnolia officinalis Electronshutting Compounds as Parkinson's Disease Remedy. Letters in Drug Design & Discovery 21, 3039-3048 (2024)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 321, + 542, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 321, + 542, + 340 + ], + "spans": [ + { + "bbox": [ + 67, + 321, + 542, + 340 + ], + "type": "text", + "content": "41. Pei, Q., Wu, L., Zhu, J., Xia, Y., Xie, S., Qin, T., Liu, H., Liu, T.-Y. & Yan, R. Breaking the barriers of data scarcity in drug-target affinity prediction. Briefings in Bioinformatics 24, bbad386 (2023)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 341, + 542, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 341, + 542, + 369 + ], + "spans": [ + { + "bbox": [ + 67, + 341, + 542, + 369 + ], + "type": "text", + "content": "42. Xia, F., Shukla, M., Brettin, T., Garcia-Cardona, C., Cohn, J., Allen, J. E., Maslov, S., Holbeck, S. L., Doroshow, J. H., Evrard, Y. A., et al. Predicting tumor cell line response to drug pairs with deep learning. BMC bioinformatics 19, 71-79 (2018)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 370, + 542, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 370, + 542, + 389 + ], + "spans": [ + { + "bbox": [ + 67, + 370, + 542, + 389 + ], + "type": "text", + "content": "43. Lind, A. P. & Anderson, P. C. Predicting drug activity against cancer cells by random forest models based on minimal genomic information and chemical properties. *PloS one* 14, e0219774 (2019)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 67, + 390, + 297, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 390, + 297, + 400 + ], + "spans": [ + { + "bbox": [ + 67, + 390, + 297, + 400 + ], + "type": "text", + "content": "44. Euclidia. https://github.com/euclidia/public-models. 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 67, + 401, + 542, + 428 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 401, + 542, + 428 + ], + "spans": [ + { + "bbox": [ + 67, + 401, + 542, + 428 + ], + "type": "text", + "content": "45. Leenay, R. T., Aghazadeh, A., Hiatt, J., Tse, D., Roth, T. L., Apathy, R., Shifrut, E., Hultquist, J. F., Krogan, N., Wu, Z., et al. Large dataset enables prediction of repair after CRISPR-Cas9 editing in primary T cells. Nature biotechnology 37, 1034-1037 (2019)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 67, + 430, + 542, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 430, + 542, + 457 + ], + "spans": [ + { + "bbox": [ + 67, + 430, + 542, + 457 + ], + "type": "text", + "content": "46. Yang, K., Swanson, K., Jin, W., Coley, C., Eiden, P., Gao, H., Guzman-Perez, A., Hopper, T., Kelley, B., Mathea, M., et al. Analyzing learned molecular representations for property prediction. Journal of chemical information and modeling 59, 3370-3388 (2019)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 67, + 459, + 542, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 459, + 542, + 478 + ], + "spans": [ + { + "bbox": [ + 67, + 459, + 542, + 478 + ], + "type": "text", + "content": "47. Preuer, K., Lewis, R. P., Hochreiter, S., Bender, A., Bulusu, K. C. & Klambauer, G. DeepSynergy: predicting anti-cancer drug synergy with Deep Learning. Bioinformatics 34, 1538-1546 (2018)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 67, + 479, + 542, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 479, + 542, + 498 + ], + "spans": [ + { + "bbox": [ + 67, + 479, + 542, + 498 + ], + "type": "text", + "content": "48. Zheng, S., Rao, J., Zhang, Z., Xu, J. & Yang, Y. Predicting retrosynthetic reactions using self-corrected transformer neural networks. Journal of chemical information and modeling 60, 47-55 (2019)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 67, + 498, + 542, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 498, + 542, + 517 + ], + "spans": [ + { + "bbox": [ + 67, + 498, + 542, + 517 + ], + "type": "text", + "content": "49. Boral, N., Ghosh, P., Goswami, A. & Bhattacharyya, M. Accountable prediction of drug ADMET Properties with molecular descriptors. bioRxiv, 2022-06 (2022)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 67, + 518, + 542, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 518, + 542, + 538 + ], + "spans": [ + { + "bbox": [ + 67, + 518, + 542, + 538 + ], + "type": "text", + "content": "50. Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D. & Steinhardt, J. Measuring massive multitask language understanding. arXiv preprint arXiv:2009.03300 (2020)." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "spans": [ + { + "bbox": [ + 528, + 742, + 541, + 751 + ], + "type": "text", + "content": "58" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 57 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file