syn
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- title_31K_G/test_title_long_2404.19159v1.json +53 -0
- title_31K_G/test_title_long_2404.19168v1.json +0 -0
- title_31K_G/test_title_long_2404.19178v1.json +0 -0
- title_31K_G/test_title_long_2404.19205v1.json +0 -0
- title_31K_G/test_title_long_2404.19211v1.json +0 -0
- title_31K_G/test_title_long_2404.19227v3.json +0 -0
- title_31K_G/test_title_long_2404.19232v2.json +0 -0
- title_31K_G/test_title_long_2404.19232v3.json +0 -0
- title_31K_G/test_title_long_2404.19245v1.json +0 -0
- title_31K_G/test_title_long_2404.19277v1.json +51 -0
- title_31K_G/test_title_long_2404.19292v1.json +0 -0
- title_31K_G/test_title_long_2404.19346v1.json +0 -0
- title_31K_G/test_title_long_2404.19382v1.json +0 -0
- title_31K_G/test_title_long_2404.19394v1.json +0 -0
- title_31K_G/test_title_long_2404.19409v1.json +0 -0
- title_31K_G/test_title_long_2404.19420v1.json +0 -0
- title_31K_G/test_title_long_2404.19429v1.json +0 -0
- title_31K_G/test_title_long_2404.19438v2.json +0 -0
- title_31K_G/test_title_long_2404.19453v1.json +0 -0
- title_31K_G/test_title_long_2404.19479v1.json +0 -0
- title_31K_G/test_title_long_2404.19482v1.json +0 -0
- title_31K_G/test_title_long_2404.19486v1.json +0 -0
- title_31K_G/test_title_long_2404.19508v1.json +0 -0
- title_31K_G/test_title_long_2404.19509v1.json +0 -0
- title_31K_G/test_title_long_2404.19531v1.json +0 -0
- title_31K_G/test_title_long_2404.19533v1.json +0 -0
- title_31K_G/test_title_long_2404.19543v1.json +0 -0
- title_31K_G/test_title_long_2404.19553v1.json +0 -0
- title_31K_G/test_title_long_2404.19563v1.json +0 -0
- title_31K_G/test_title_long_2404.19597v1.json +0 -0
- title_31K_G/test_title_long_2404.19639v1.json +50 -0
- title_31K_G/test_title_long_2404.19696v1.json +0 -0
- title_31K_G/test_title_long_2404.19708v1.json +0 -0
- title_31K_G/test_title_long_2404.19715v1.json +0 -0
- title_31K_G/test_title_long_2404.19739v1.json +53 -0
- title_31K_G/test_title_long_2404.19740v1.json +0 -0
- title_31K_G/test_title_long_2404.19752v1.json +0 -0
- title_31K_G/test_title_long_2404.19759v1.json +0 -0
- title_31K_G/test_title_long_2405.00057v1.json +0 -0
- title_31K_G/test_title_long_2405.00077v1.json +0 -0
- title_31K_G/test_title_long_2405.00099v1.json +0 -0
- title_31K_G/test_title_long_2405.00175v1.json +0 -0
- title_31K_G/test_title_long_2405.00181v1.json +0 -0
- title_31K_G/test_title_long_2405.00198v1.json +0 -0
- title_31K_G/test_title_long_2405.00201v1.json +0 -0
- title_31K_G/test_title_long_2405.00204v1.json +0 -0
- title_31K_G/test_title_long_2405.00216v1.json +0 -0
- title_31K_G/test_title_long_2405.00218v1.json +0 -0
- title_31K_G/test_title_long_2405.00242v1.json +0 -0
- title_31K_G/test_title_long_2405.00243v1.json +0 -0
title_31K_G/test_title_long_2404.19159v1.json
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"url": "http://arxiv.org/abs/2404.19159v1",
|
| 3 |
+
"title": "What Drives Performance in Multilingual Language Models?",
|
| 4 |
+
"abstract": "This study investigates the factors influencing the performance of\nmultilingual large language models (MLLMs) across diverse languages. We study 6\nMLLMs, including masked language models, autoregressive models, and\ninstruction-tuned LLMs, on the SIB-200 dataset, a topic classification dataset\nencompassing 204 languages. Our analysis considers three scenarios: ALL\nlanguages, SEEN languages (present in the model's pretraining data), and UNSEEN\nlanguages (not present or documented in the model's pretraining data in any\nmeaningful way). We examine the impact of factors such as pretraining data\nsize, general resource availability, language family, and script type on model\nperformance. Decision tree analysis reveals that pretraining data size is the\nmost influential factor for SEEN languages. However, interestingly, script type\nand language family are crucial for UNSEEN languages, highlighting the\nimportance of cross-lingual transfer learning. Notably, model size and\narchitecture do not significantly alter the most important features identified.\nOur findings provide valuable insights into the strengths and limitations of\ncurrent MLLMs and hope to guide the development of more effective and equitable\nmultilingual NLP systems.",
|
| 5 |
+
"authors": "Sina Bagheri Nezhad, Ameeta Agrawal",
|
| 6 |
+
"published": "2024-04-29",
|
| 7 |
+
"updated": "2024-04-29",
|
| 8 |
+
"primary_cat": "cs.CL",
|
| 9 |
+
"cats": [
|
| 10 |
+
"cs.CL",
|
| 11 |
+
"I.2.7"
|
| 12 |
+
],
|
| 13 |
+
"label": "Original Paper",
|
| 14 |
+
"paper_cat": "LLM Fairness",
|
| 15 |
+
"gt": "What Drives Performance in Multilingual Language Models?",
|
| 16 |
+
"main_content": "Introduction Multilingual large language models (MLLMs) have revolutionized natural language processing by enabling applications like machine translation and sentiment analysis across numerous languages (Barbieri et al., 2022; Yang et al., 2023). Understanding how these models perform across languages with diverse linguistic properties is crucial for further development (Devlin et al., 2019; Wu and Dredze, 2020; Scao et al., 2022; Lai et al., 2023; Ahuja et al., 2023). Despite significant 1https://github.com/PortNLP/MLLMs_performance progress, linguistic disparities persist in NLP, highlighting the need for models that perform effectively and safely across a wider range of languages (Joshi et al., 2020; Ranathunga and de Silva, 2022; Agrawal et al., 2023; Wang et al., 2023). The factors contributing to the effectiveness of MLLMs, however, remain unclear. While several studies suggest the amount of language-specific pretraining data as a key factor (Wu and Dredze, 2020; Scao et al., 2022; Shliazhko et al., 2022; Ahuja et al., 2023), most investigations are limited in scope, focusing on a small set of languages, specific tasks, or training paradigms like masked language modeling (MLM) or autoregressive models. Crucially, prior work often overlooks the distinction between languages encountered during pretraining (SEEN), languages entirely new to the model (UNSEEN), and the complete set of languages available in the evaluation dataset (ALL). The question remains \u2013 what factors are important in the case of unseen languages where languagespecific pretraining data is not one of the relevant factors? This distinction is essential for understanding how MLLMs generalize to languages with varying levels of familiarity. Our work takes a deeper look at the various factors under several experimental settings. Our key contributions are as follows: \u2022 We conduct a comprehensive evaluation of 6 MLLMs, including MLM, autoregressive, and instruction-tuned LLMs, on a text classification task spanning a wide range of languages. This diverse set of models includes mBERT (Devlin et al., 2019), XLM-R (Conneau et al., 2020), GPT-3.5 (Brown et al., 2020), Bloom (Scao et al., 2022) in 5 sizes, Bloomz (Muennighoff et al., 2023) in 5 sizes, and XGLM (Lin et al., 2022) in 4 sizes. Additionally, we consider three training scenarios: zero-shot, 2-shot, and fully supervised. \u2022 We consider four key factors in our analysis: prearXiv:2404.19159v1 [cs.CL] 29 Apr 2024 \fReference Factors Task Languages Wu and Dredze (2020) Pretraining data size, Task-specific data size, Vocabulary size NER 99 Scao et al. (2022) Pretraining data size, Task-specific data size, Language family, Language script Probing 17 Shliazhko et al. (2022) Pretraining data size, Language script, Model size Perplexity 61 Ahuja et al. (2023) Pretraining data size, Tokenizer fertility Classification, QA, Sequence Labeling, NLG 2-48 Ours Pretraining data size, Language family, Language script, General resource availability Text classification 204 Table 1: Factors considered in related works and this work. training data size, general resource availability levels, language family, and script type. This allows for a more nuanced understanding of the factors influencing MLLM performance. \u2022 We leverage the recently introduced SIB-200 dataset (Adelani et al., 2023), which includes 204 languages, enabling us to investigate MLLM performance across a diverse and extensive linguistic landscape. Between the languages pertaining to the models and the dataset, we are able to further distinguish them along the dimensions of SEEN, UNSEEN, or ALL, depending on whether the languages were seen during pretraining, or unseen during pretraining, or the set of all languages available in the evaluation dataset, respectively. By analyzing these factors across different models and training setups, we aim to provide deeper insights into the development of effective and equitable MLLMs for a truly multilingual NLP landscape. 2 Related Work Multilingual NLP research has flourished in recent years, with the development and evaluation of numerous multilingual language models trained on diverse and extensive language datasets. Notable examples include mBERT (Devlin et al., 2019), XLM-R (Conneau et al., 2020), mBART (Liu et al., 2020), mT5 (Xue et al., 2021), BLOOM (Scao et al., 2022), GPT-3 (Brown et al., 2020), GPT-4 (OpenAI, 2023), LLaMA (Touvron et al., 2023), PaLM (Chowdhery et al., 2022), and PaLM 2 (Anil et al., 2023). Researchers are increasingly interested in investigating the factors influencing MLLM performance. Wu and Dredze (2020) examined the impact of pretraining data size, task-specific data size, and vocabulary size on named entity recognition performance. Scao et al. (2022) explored the correlation between probing performance and factors like language family, task-specific dataset size, and pretraining dataset size for the BLOOM model. Shliazhko et al. (2022) assessed the impact of language script, pretraining corpus size, and model size on language modeling performance, while Ahuja et al. (2023) investigated the influence of tokenizer fertility and pretraining data on MLLM performance. While these studies provide valuable insights, they often focus on a limited set of languages, primarily due to the historical scarcity of annotated multilingual datasets. Additionally, research by Blasi et al. (2022) highlights the significant inequalities in the development and performance of language technologies across the world\u2019s languages, with a strong bias towards resource-rich languages like English and other Western European languages. Further exacerbating this issue is the lack of representation for dialects, varieties, and closely-related languages within existing datasets. As noted by Faisal et al. (2024), this absence hinders the development of NLP systems capable of effectively handling the nuances of linguistic diversity. However, the recent emergence of comprehensive multilingual datasets like SIB-200 (Adelani et al., 2023), and GLOT500 (ImaniGooghari et al., 2023) offers exciting opportunities for more extensive and nuanced analyses. Table 1 summarizes the factors considered in related works and our study. For a more comprehensive overview of contributing factors to cross-lingual transfer in multilingual language models, readers are encouraged to refer to the review by Philippy et al. (2023). \f3 Methodology Several factors can influence the performance of multilingual models. In this section, we briefly describe the distinct factors related to typology and data, the dataset of more than 200 languages used for evaluation, and the models we consider in this study. 3.1 Typology and Data Factors We consider various factors to understand their impact on model performance including: \u2022 Pretraining Data Size: This refers to the percentage of language-specific data used during the pretraining of each model2. \u2022 General Resource Availability (Res Level): Beyond model-specific resources such as pretraining data size, we also consider a more general notion of resource availability, as per the linguistic diversity taxonomy which categorizes languages into six resource levels (Joshi et al., 2020), where level 0 corresponds to low-resource and level 5 corresponds to high-resource level languages. This classification helps us understand the influence of more general resource availability on model performance, and may serve as a proxy when model-specific statistics may not be available (such as in the case of proprietary models). Language resource levels generally correlate positively with models pretraining data sizes, with varying degrees of alignment across different models: mBERT (0.52) and XLM-R (0.48) exhibit relatively stronger correlations, while GPT3 (0.18), BLOOM (0.37), and XGLM (0.31) show comparatively weaker associations. \u2022 Language Family (Lang Family): The language families that the languages belong to capture some of their linguistic relationships. The information was sourced from the Ethnologue3 (Ethnologue, 2022). \u2022 Script: The script of a language refers to the writing system it employs. This information was sourced from ScriptSource4. 2We obtained the train dataset distribution values for mBERT from https://github.com/mayhewsw/ multilingual-data-stats and for GPT-3.5 we use proxy statistics from https://github.com/openai/gpt-3/blob/ master/dataset_statistics/languages_by_word_ count.csv. Distribution of train dataset for XLM-R, BLOOM, BLOOMZ and XGLM were obtained from their respective papers. 3https://www.ethnologue.com 4https://www.scriptsource.org 3.2 Data We systematically study the multilingual models under an important NLP task \u2013 text classification (Chang and Bergen, 2023). The SIB-200 dataset (Adelani et al., 2023) offers a valuable resource for evaluating MLLM performance in a large-scale text classification task, enabling simultaneous analysis of approximately 200 languages, with text samples categorized into one of seven classes. F1 score is used as the metric for this task. Exploratory analysis of the dataset reveals several interesting insights: \u2022 As shown in Figure 1, most languages in SIB200 are classified as resource level 1, indicating a deliberate focus on low-resource languages. This allows us to assess how MLLMs perform on languages with limited linguistic resources available. \u2022 Figure 4 in Appendix B illustrates the distribution of language families within the SIB-200 dataset. Notably, the dataset encompasses 23 different language families, providing a rich linguistic landscape for our analysis. IndoEuropean languages constitute a significant portion (approximately 36%) of SIB-200, reflecting their status as the most widely spoken language family globally (Ethnologue, 2022). However, Niger-Congo, Afro-Asiatic, and Austronesian languages also have considerable representation in the dataset. This diverse language family distribution enables us to analyze MLLM performance across different linguistic groups. \u2022 The SIB-200 dataset encompasses text samples written in 29 different script types, offering a diverse range of writing systems for our analysis. As shown in Figure 5 in Appendix B, the Latin script, used by nearly 70% of the global population (Vaughan, 2020), is the most prevalent writing system in the dataset, followed by Arabic and Cyrillic scripts. This distribution allows us to investigate the impact of script type on MLLM performance. For all evaluations, we use the default train and test splits recommended by the SIB-200 authors. This ensures consistency and comparability across different models and training settings. \fFigure 1: Distribution of resource levels in SIB-200. 3.3 Models We study the following 6 multilingual language models spanning various architectures and sizes: \u2022 Masked Language Models (MLMs): \u2013 mBERT (bert-base-multilingual-cased) (Devlin et al., 2019) \u2013 XLM-R (xlm-roberta-base) (Conneau et al., 2020) \u2022 Autoregressive Language Models \u2013 GPT-3.5 (text-davinci-003) (Brown et al., 2020) \u2013 Bloom (Scao et al., 2022) in 5 sizes (560m, 1.1b, 1.7b, 3b, and 7.1b parameters) \u2013 XGLM (Lin et al., 2022) in 4 sizes (564m, 1.7b, 2.9b, and 7.5b parameters) \u2022 Instruction-tuned LLMs: \u2013 Bloomz (Muennighoff et al., 2023) in 5 sizes (560m, 1.1b, 1.7b, 3b, and 7.1b parameters) These models were chosen for several key reasons: 1. These models provide broad language coverage, allowing us to analyze performance across a diverse set of languages and maximize the linguistic diversity in our study. 2. By including MLMs, autoregressive models, and instruction-tuned LLMs, we can investigate how different model architectures influence performance. 3. The inclusion of models with varying parameter sizes allows us to investigate the interplay between model scale and the factors influencing performance. 4. mBERT and XLM-R, despite being relatively smaller models, have demonstrated competitive performance even compared to larger models like ChatGPT after fine-tuning (Lai et al., 2023; Zhu et al., 2023). 5. The inclusion of both Bloom and XGLM, both autoregressive models, allows us to investigate the impact of pretraining data composition. Bloom focuses more on low-resource languages during pretraining, whereas XGLM emphasizes high-resource languages. This deliberate selection enables us to analyze how the distribution of languages in the pretraining data affects performance across different resource levels. Note that we primarily focus on models that are open-source or have made the list of pretraining languages and data composition available. Additionally, we consider the following training and inference scenarios: \u2022 Zero-shot: GPT-3.5, Bloom, Bloomz, and XGLM were evaluated directly on the test set without any specific fine-tuning. This assesses the model\u2019s ability to generalize to unseen tasks and languages based on its pretrained knowledge. \u2022 Two-shot In-Context Learning (ICL): Bloom, Bloomz, and XGLM were also evaluated in twoshot ICL setting where the models were provided with two labeled examples for each class from the train set. This allows us to particularly investigate effective factors for improving performance of unseen languages. We opted for two demonstrations in ICL to keep the input length shorter than the context length of our models across all languages. \u2022 Full-shot: mBERT and XLM-R were fine-tuned on the SIB-200 training set and evaluated on the test set. For full-shot training of mBERT and XLM-R, we adhered to the hyperparameters recommended by the SIB-200 paper authors to ensure consistency with the original dataset benchmarks. For Bloom, Bloomz, and XGLM in both zero-shot and two-shot ICL settings, as well as for GPT-3.5 in zero-shot setting, we use prompts to frame the text classification task, which are detailed in Appendix A. 4 Results and Analysis Now we discuss the results of our comprehensive experiments. We focus on analyzing the performance of models across three distinct scenarios: ALL, SEEN, and UNSEEN. The ALL \fFigure 2: Decision tree for Bloom-560m (zero-shot, SEEN languages). \u201cGeneral resource level\u201c emerges as the most important feature, with a significant performance difference between languages above and below the 2.5 threshold (p < 0.001 as per Mann-Whitney U test). scenario considers all languages in the SIB-200 dataset for which resource level information is available5. The SEEN scenario focuses on languages included in the pretraining data of the respective MLLMs, while the UNSEEN scenario examines performance on languages not present in the pretraining data. In total, results are obtained from 93 distinct experimental settings (models of different sizes, training scenarios, and language categories of seen/unseen/all). To understand the complex interplay of multiple factors influencing MLLM performance, we employ decision tree analysis for statistical inference. This approach is well-suited for handling factors of different types, including categorical, ordinal, and numeric data. Decision trees are trained to predict the F1 score of models based on language features. By analyzing the resulting tree structure, we can gain insights into the relative importance of different features and their interactions. As decision trees were trained on the entirety of our data, traditional methods for testing their performance were not applicable. Instead, we employed the Mann-Whitney U test (Mann and Whitney, 1947), to ensure that the features appearing at the root of the decision trees were indeed relevant and contributed significantly to the differentiation between the language splits. This approach allowed us to validate the significance of the features identified by the decision tree in delineating 5This information is available for 190 languages. distinct language groups without relying solely on the performance metrics of the decision tree models themselves. Figure 2 presents the decision tree analysis for the Bloom-560m model on SEEN languages, revealing general resource level as the most influential feature. Specifically, the tree distinguishes between languages with resource levels below 2.5 (levels 0,1,2) and those above 2.5 (levels 3,4,5). Among the 44 SEEN languages, the 29 languages with resource levels below 2.5 exhibit a mean F1 score of 0.174, while the 15 languages with higher resource levels achieve a significantly higher mean F1 score of 0.379. A Mann-Whitney U test confirms a statistically significant difference in performance between these two groups (p < 0.001). This suggests that for the Bloom-560m model on SEEN languages, the general resource level of a language plays a crucial role in determining its performance, with higher resource levels leading to better performance. By employing this combined approach of decision tree analysis and statistical testing, we can effectively disentangle the complex relationships between various factors and their impact on MLLM performance. The summarized results6 of all 93 decision tree analyses are presented in Table 2. We observe distinct patterns in feature importance across the three scenarios: 6Detailed decision trees for all models and setups are available in our repository: https://github.com/PortNLP/ MLLMs_performance \fZero-shot Model ALL SEEN UNSEEN Bloom-560m Pretrain data (<=0.125%) Resource level (<=2.5) Script (Latin or not) Bloom-1b1 Pretrain data (<=0.125%) Resource level (<=2.5) Script (Devanagari or not) Bloom-1b7 Pretrain data (<=0.175%) Resource level (<=2.5) Script (Latin or not) Bloom-3b Pretrain data (<=0.175%) Resource level (<=2.5) Script (Latin or not) Bloom-7b1 Pretrain data (<=0.125%) Resource level (<=2.5) Script (Devanagari or not) Bloomz-560m Script (Latin or not) Pretrain data (<=0.03%) Script (Latin or not) Bloomz-1b1 Pretrain data (<=0.008%) Pretrain data (<=0.03%) Script (Latin or not) Bloomz-1b7 Pretrain data (<=0.008%) Pretrain data (<=0.03%) Script (Latin or not) Bloomz-3b Pretrain data (<=0.002%) Pretrain data (<=0.013%) Script (Latin or not) Bloomz-7b1 Pretrain data (<=0%) Pretrain data (<=0.9%) Script (Latin or not) XGLM-564m Pretrain data (<=0.003%) Resource level (<=2) Lang. family (Austronesian or not) XGLM-1.7b Pretrain data (<=0.006%) Pretrain data (<=1.487%) Script (Devanagari or not) XGLM-2.9b Pretrain data (<=0.003%) Script (Latin or not) Script (Devanagari or not) XGLM-7.5b Pretrain data (<=0%) Pretrain data (<=1.122%) Script (Devanagari or not) GPT-3.5 Resource level (<= 2.5) Pretrain data (<=5.312%) Lang. family (Indo-European or not) Two-shot ICL Model ALL SEEN UNSEEN Bloom-560m Pretrain data (<=0.045%) Pretrain data (<=0.045%) Lang. family (Indo-European or not) Bloom-1b1 Pretrain data (<=0.095%) Pretrain data (<=0.095%) Script (Latin or not) Bloom-1b7 Pretrain data (<=0.175%) Pretrain data (<=0.175%) Script (Latin or not) Bloom-3b Pretrain data (<=0.008%) Pretrain data (<=0.008%) Script (Latin or not) Bloom-7b1 Pretrain data (<=0.008%) Pretrain data (<=0.008%) Script (Latin or not) Bloomz-560m Pretrain data (<=0.03%) Pretrain data (<0.03%) Script (Devanagari or not) Bloomz-1b1 Pretrain data (<=0.008%) Pretrain data (<=0.013%) Script (Latin or not) Bloomz-1b7 Pretrain data (<=0.005%) Pretrain data (<=0.013%) Script (Cyrillic or not) Bloomz-3b Pretrain data (<=0%) Pretrain data (<=0.9%) Script (Latin or not) Bloomz-7b1 Pretrain data (<=0%) Pretrain data (<=0.013%) Script (Latin or not) XGLM-564m Pretrain data (<=0.003%) Pretrain data (<=0.095%) Lang. family (Niger-Congo or not) XGLM-1.7b Pretrain data (<=0.003%) Resource level (<=2) Script (Devanagari or not) XGLM-2.9b Pretrain data (<=0.003%) Script (Latin or not) Lang. family (Indo-European or not) XGLM-7.5b Pretrain data (<=0.003%) Pretrain data (<=0.15%) Lang. family (Indo-European or not) Full-shot Model ALL SEEN UNSEEN mBERT Pretrain data (<=3.786%) Pretrain data (<=8.627%) Lang. family (Indo-European or not) XLM-R Pretrain data (<=13.5%) Pretrain data (<=90%) Lang. family (Indo-European or not) Table 2: Top features identified by decision tree analysis for each model and scenario. For SEEN languages, pretraining data size and resource level dominate (except for XGLM-2.9b, where script type is most influential). For UNSEEN languages, linguistic characteristics (script type and language family) take precedence. All features exhibit statistically significant differences in performance (p < 0.001). ALL Languages: \u2022 For the ALL languages scenario, decision trees clearly reveal that pretraining data is the most influential factor in 29 out of 31 cases. Because ALL includes languages SEEN and UNSEEN, notably, our deeper look at the decision tree analyses indicates that this factor in most cases boils down to whether the language was part of the training set or not, rather than the amount of language-specific data, as indicated by the values \fFigure 3: F1 Score vs. model-specific pretraining data (percentage) for GPT-3.5, mBERT and XLM-R models. of the pretraining data percentages which range from 0% to at most 0.175%. GPT-3.5 model draws the distinction along general resource levels whether a language is low resource (0, 1, or 2) or level 3 and higher. SEEN Languages: \u2022 For SEEN languages, model-specific pretraining data continues to remain the most influential factor in 22 out of 31 model and scenario combinations. However, this time because there are no unseen languages in the mix, the model performance seems to be impacted by the amount of pretraining data, as indicated by the slightly higher percentage values as compared to the ALL languages scenario. \u2022 Interestingly, general resource availability based on linguistic diversity taxonomy (Joshi et al., 2020) appears to be the most important factor for Bloom models in the zero-shot setup, as well as for xglm-564m (zero-shot) and xglm-1.7b (twoshot). For Bloom models, the distinction is along resource levels 0/1/2 or higher, whereas for xglm models, it is along 0/1 and higher. Additionally, xglm-2.9b in both zero-shot and two-shot scenarios shows a stronger influence of script type (Latin or not). These cases indicate that factors beyond pretraining data size can also play a significant role for specific models and settings. \u2022 Furthermore, Figure 3 plots the performance of mBERT, XLM-R, and GPT-3.5 models in relation to model-specific pretraining data amounts. The figure demonstrates a clear trend: as the modelspecific language data increases, so does the model\u2019s performance. This observation aligns with the finding that pretraining data size is a crucial factor for SEEN languages. UNSEEN Languages: \u2022 In contrast to SEEN languages, UNSEEN languages show quite a different pattern. Naturally, because UNSEEN languages do not have pretraining data as one of their relevant factors, it is absent from this column. However, out of 31 models, 23 are most impacted by script type, and 8 are most influenced by language family. This shift in importance towards linguistic features suggests that when models encounter unfamiliar languages, they rely more heavily on similarities in writing systems to generalize from their existing knowledge. \u2022 Within the scripts and language families, there are nuanced differences. For instance, while generally the models make the distinction along the lines of whether the script is Latin or not, occassionally Devanagari script also seems important, particularly for XGLM models. Similarly, while Indo-European is the most common influential language family, we also observe an instance each of Austronesian and Niger-Congo. Additionally, models of different sizes from the same family may prefer not just a different script or a different language family when moving from zero-shot to two-shot setting, they may prefer an entirely different factor (e.g., Bloom-560m in zero-shot vs. two-shot settings), further complicating the matters. 5 Discussion Our comprehensive analysis of 6 multilingual models on the SIB-200 dataset reveals valuable insights into the factors influencing their performance across a diverse range of languages. Our key findings can be summarized as follows: \u2022 Pretraining data size consistently emerges as a crucial factor, but the distinction is less along \fthe quantity of data but rather whether the languages have been encountered during training or not. \u2022 For UNSEEN languages, script type and language family are influential, suggesting that MLLMs rely on cross-lingual transfer learning to generalize to unfamiliar languages. \u2022 General resource availability plays a less prominent role overall but appears to be important for one specific model under one setting (Bloom in zero-shot for seen languages). \u2022 Interestingly, the performance of Bloomz, an instruction-tuned model, is more influenced by the distribution of languages in its pretraining corpus than the fine-tuned dataset used for instruction tuning. This suggests that the initial pretraining stage plays a crucial role in shaping the model\u2019s capabilities, even after further fine-tuning for specific tasks. \u2022 Finally, our analysis also indicates that while model size and architecture may influence overall performance, they do not significantly alter the most important features identified by the decision trees. The distribution of languages in the pretraining data and the linguistic characteristics of the target languages consistently emerge as the dominant factors regardless of the specific model architecture or scale. Several future directions remain to be explored. We observed that script type can be more influential for specific models and settings. Further investigation is needed to understand the reasons behind these preferences and how they can be leveraged to achieve more consistent performance across languages. It is also not clear why models lean towards different factors under different settings (for instance, resource level is important in Bloom-560m zero-shot setting but pretraining data is important in its two-shot ICL setting). 6 Conclusion This study analyzed 6 multilingual language models on the SIB-200 dataset, revealing key insights into their performance across around 200 languages. We found that the size of the pretraining data significantly affects performance. For unseen languages, script type and language family become more crucial, highlighting the importance of crosslingual transfer learning. While general resource availability plays a less prominent role overall, it can be significant for specific models and settings. Interestingly, model size and architecture do not significantly change the most important features identified in our analysis. Our work contributes to a deeper understanding of MLLMs and hopes to guide the development of more effective and equitable multilingual NLP systems. Limitations This study provides insights into multilingual language model performance, but it is important to acknowledge certain limitations. The SIB-200 dataset, while extensive, may contain biases in language representation and genre distribution, potentially affecting the generalizability of our findings. Additionally, our analysis focuses on the text classification task, and the findings may not directly generalize to other NLP tasks. While we analyzed a diverse set of models, our findings may not be fully representative of the entire MLLM landscape. Finally, our analysis is based on the current state of MLLMs, and the relative importance of different factors may change as these models continue to evolve. Future research should address these limitations by expanding to more diverse datasets, investigating different NLP tasks, evaluating a broader range of models, and conducting longitudinal studies. Ethics Statement The experimental setup and code implementation ensured adherence to ethical guidelines, data usage agreements, and compliance with the terms of service of the respective language models and data sources. The research team also recognized the importance of inclusivity and fairness by considering a diverse set of languages and language families in the evaluation, thus avoiding biases and promoting balanced representation. Acknowledgements We are grateful to the anonymous reviewers whose feedback and thought-provoking questions enhanced this paper. The engaging discussions and collaborative spirit within the PortNLP research group were instrumental in shaping this research. We acknowledge the National Science Foundation for their financial support through grants (CRII:RI \f2246174 and SAI-P 2228783), which made this work possible.",
|
| 17 |
+
"additional_graph_info": {
|
| 18 |
+
"graph": [],
|
| 19 |
+
"node_feat": {
|
| 20 |
+
"Sina Bagheri Nezhad": [
|
| 21 |
+
{
|
| 22 |
+
"url": "http://arxiv.org/abs/2404.19159v1",
|
| 23 |
+
"title": "What Drives Performance in Multilingual Language Models?",
|
| 24 |
+
"abstract": "This study investigates the factors influencing the performance of\nmultilingual large language models (MLLMs) across diverse languages. We study 6\nMLLMs, including masked language models, autoregressive models, and\ninstruction-tuned LLMs, on the SIB-200 dataset, a topic classification dataset\nencompassing 204 languages. Our analysis considers three scenarios: ALL\nlanguages, SEEN languages (present in the model's pretraining data), and UNSEEN\nlanguages (not present or documented in the model's pretraining data in any\nmeaningful way). We examine the impact of factors such as pretraining data\nsize, general resource availability, language family, and script type on model\nperformance. Decision tree analysis reveals that pretraining data size is the\nmost influential factor for SEEN languages. However, interestingly, script type\nand language family are crucial for UNSEEN languages, highlighting the\nimportance of cross-lingual transfer learning. Notably, model size and\narchitecture do not significantly alter the most important features identified.\nOur findings provide valuable insights into the strengths and limitations of\ncurrent MLLMs and hope to guide the development of more effective and equitable\nmultilingual NLP systems.",
|
| 25 |
+
"authors": "Sina Bagheri Nezhad, Ameeta Agrawal",
|
| 26 |
+
"published": "2024-04-29",
|
| 27 |
+
"updated": "2024-04-29",
|
| 28 |
+
"primary_cat": "cs.CL",
|
| 29 |
+
"cats": [
|
| 30 |
+
"cs.CL",
|
| 31 |
+
"I.2.7"
|
| 32 |
+
],
|
| 33 |
+
"main_content": "Introduction Multilingual large language models (MLLMs) have revolutionized natural language processing by enabling applications like machine translation and sentiment analysis across numerous languages (Barbieri et al., 2022; Yang et al., 2023). Understanding how these models perform across languages with diverse linguistic properties is crucial for further development (Devlin et al., 2019; Wu and Dredze, 2020; Scao et al., 2022; Lai et al., 2023; Ahuja et al., 2023). Despite significant 1https://github.com/PortNLP/MLLMs_performance progress, linguistic disparities persist in NLP, highlighting the need for models that perform effectively and safely across a wider range of languages (Joshi et al., 2020; Ranathunga and de Silva, 2022; Agrawal et al., 2023; Wang et al., 2023). The factors contributing to the effectiveness of MLLMs, however, remain unclear. While several studies suggest the amount of language-specific pretraining data as a key factor (Wu and Dredze, 2020; Scao et al., 2022; Shliazhko et al., 2022; Ahuja et al., 2023), most investigations are limited in scope, focusing on a small set of languages, specific tasks, or training paradigms like masked language modeling (MLM) or autoregressive models. Crucially, prior work often overlooks the distinction between languages encountered during pretraining (SEEN), languages entirely new to the model (UNSEEN), and the complete set of languages available in the evaluation dataset (ALL). The question remains \u2013 what factors are important in the case of unseen languages where languagespecific pretraining data is not one of the relevant factors? This distinction is essential for understanding how MLLMs generalize to languages with varying levels of familiarity. Our work takes a deeper look at the various factors under several experimental settings. Our key contributions are as follows: \u2022 We conduct a comprehensive evaluation of 6 MLLMs, including MLM, autoregressive, and instruction-tuned LLMs, on a text classification task spanning a wide range of languages. This diverse set of models includes mBERT (Devlin et al., 2019), XLM-R (Conneau et al., 2020), GPT-3.5 (Brown et al., 2020), Bloom (Scao et al., 2022) in 5 sizes, Bloomz (Muennighoff et al., 2023) in 5 sizes, and XGLM (Lin et al., 2022) in 4 sizes. Additionally, we consider three training scenarios: zero-shot, 2-shot, and fully supervised. \u2022 We consider four key factors in our analysis: prearXiv:2404.19159v1 [cs.CL] 29 Apr 2024 \fReference Factors Task Languages Wu and Dredze (2020) Pretraining data size, Task-specific data size, Vocabulary size NER 99 Scao et al. (2022) Pretraining data size, Task-specific data size, Language family, Language script Probing 17 Shliazhko et al. (2022) Pretraining data size, Language script, Model size Perplexity 61 Ahuja et al. (2023) Pretraining data size, Tokenizer fertility Classification, QA, Sequence Labeling, NLG 2-48 Ours Pretraining data size, Language family, Language script, General resource availability Text classification 204 Table 1: Factors considered in related works and this work. training data size, general resource availability levels, language family, and script type. This allows for a more nuanced understanding of the factors influencing MLLM performance. \u2022 We leverage the recently introduced SIB-200 dataset (Adelani et al., 2023), which includes 204 languages, enabling us to investigate MLLM performance across a diverse and extensive linguistic landscape. Between the languages pertaining to the models and the dataset, we are able to further distinguish them along the dimensions of SEEN, UNSEEN, or ALL, depending on whether the languages were seen during pretraining, or unseen during pretraining, or the set of all languages available in the evaluation dataset, respectively. By analyzing these factors across different models and training setups, we aim to provide deeper insights into the development of effective and equitable MLLMs for a truly multilingual NLP landscape. 2 Related Work Multilingual NLP research has flourished in recent years, with the development and evaluation of numerous multilingual language models trained on diverse and extensive language datasets. Notable examples include mBERT (Devlin et al., 2019), XLM-R (Conneau et al., 2020), mBART (Liu et al., 2020), mT5 (Xue et al., 2021), BLOOM (Scao et al., 2022), GPT-3 (Brown et al., 2020), GPT-4 (OpenAI, 2023), LLaMA (Touvron et al., 2023), PaLM (Chowdhery et al., 2022), and PaLM 2 (Anil et al., 2023). Researchers are increasingly interested in investigating the factors influencing MLLM performance. Wu and Dredze (2020) examined the impact of pretraining data size, task-specific data size, and vocabulary size on named entity recognition performance. Scao et al. (2022) explored the correlation between probing performance and factors like language family, task-specific dataset size, and pretraining dataset size for the BLOOM model. Shliazhko et al. (2022) assessed the impact of language script, pretraining corpus size, and model size on language modeling performance, while Ahuja et al. (2023) investigated the influence of tokenizer fertility and pretraining data on MLLM performance. While these studies provide valuable insights, they often focus on a limited set of languages, primarily due to the historical scarcity of annotated multilingual datasets. Additionally, research by Blasi et al. (2022) highlights the significant inequalities in the development and performance of language technologies across the world\u2019s languages, with a strong bias towards resource-rich languages like English and other Western European languages. Further exacerbating this issue is the lack of representation for dialects, varieties, and closely-related languages within existing datasets. As noted by Faisal et al. (2024), this absence hinders the development of NLP systems capable of effectively handling the nuances of linguistic diversity. However, the recent emergence of comprehensive multilingual datasets like SIB-200 (Adelani et al., 2023), and GLOT500 (ImaniGooghari et al., 2023) offers exciting opportunities for more extensive and nuanced analyses. Table 1 summarizes the factors considered in related works and our study. For a more comprehensive overview of contributing factors to cross-lingual transfer in multilingual language models, readers are encouraged to refer to the review by Philippy et al. (2023). \f3 Methodology Several factors can influence the performance of multilingual models. In this section, we briefly describe the distinct factors related to typology and data, the dataset of more than 200 languages used for evaluation, and the models we consider in this study. 3.1 Typology and Data Factors We consider various factors to understand their impact on model performance including: \u2022 Pretraining Data Size: This refers to the percentage of language-specific data used during the pretraining of each model2. \u2022 General Resource Availability (Res Level): Beyond model-specific resources such as pretraining data size, we also consider a more general notion of resource availability, as per the linguistic diversity taxonomy which categorizes languages into six resource levels (Joshi et al., 2020), where level 0 corresponds to low-resource and level 5 corresponds to high-resource level languages. This classification helps us understand the influence of more general resource availability on model performance, and may serve as a proxy when model-specific statistics may not be available (such as in the case of proprietary models). Language resource levels generally correlate positively with models pretraining data sizes, with varying degrees of alignment across different models: mBERT (0.52) and XLM-R (0.48) exhibit relatively stronger correlations, while GPT3 (0.18), BLOOM (0.37), and XGLM (0.31) show comparatively weaker associations. \u2022 Language Family (Lang Family): The language families that the languages belong to capture some of their linguistic relationships. The information was sourced from the Ethnologue3 (Ethnologue, 2022). \u2022 Script: The script of a language refers to the writing system it employs. This information was sourced from ScriptSource4. 2We obtained the train dataset distribution values for mBERT from https://github.com/mayhewsw/ multilingual-data-stats and for GPT-3.5 we use proxy statistics from https://github.com/openai/gpt-3/blob/ master/dataset_statistics/languages_by_word_ count.csv. Distribution of train dataset for XLM-R, BLOOM, BLOOMZ and XGLM were obtained from their respective papers. 3https://www.ethnologue.com 4https://www.scriptsource.org 3.2 Data We systematically study the multilingual models under an important NLP task \u2013 text classification (Chang and Bergen, 2023). The SIB-200 dataset (Adelani et al., 2023) offers a valuable resource for evaluating MLLM performance in a large-scale text classification task, enabling simultaneous analysis of approximately 200 languages, with text samples categorized into one of seven classes. F1 score is used as the metric for this task. Exploratory analysis of the dataset reveals several interesting insights: \u2022 As shown in Figure 1, most languages in SIB200 are classified as resource level 1, indicating a deliberate focus on low-resource languages. This allows us to assess how MLLMs perform on languages with limited linguistic resources available. \u2022 Figure 4 in Appendix B illustrates the distribution of language families within the SIB-200 dataset. Notably, the dataset encompasses 23 different language families, providing a rich linguistic landscape for our analysis. IndoEuropean languages constitute a significant portion (approximately 36%) of SIB-200, reflecting their status as the most widely spoken language family globally (Ethnologue, 2022). However, Niger-Congo, Afro-Asiatic, and Austronesian languages also have considerable representation in the dataset. This diverse language family distribution enables us to analyze MLLM performance across different linguistic groups. \u2022 The SIB-200 dataset encompasses text samples written in 29 different script types, offering a diverse range of writing systems for our analysis. As shown in Figure 5 in Appendix B, the Latin script, used by nearly 70% of the global population (Vaughan, 2020), is the most prevalent writing system in the dataset, followed by Arabic and Cyrillic scripts. This distribution allows us to investigate the impact of script type on MLLM performance. For all evaluations, we use the default train and test splits recommended by the SIB-200 authors. This ensures consistency and comparability across different models and training settings. \fFigure 1: Distribution of resource levels in SIB-200. 3.3 Models We study the following 6 multilingual language models spanning various architectures and sizes: \u2022 Masked Language Models (MLMs): \u2013 mBERT (bert-base-multilingual-cased) (Devlin et al., 2019) \u2013 XLM-R (xlm-roberta-base) (Conneau et al., 2020) \u2022 Autoregressive Language Models \u2013 GPT-3.5 (text-davinci-003) (Brown et al., 2020) \u2013 Bloom (Scao et al., 2022) in 5 sizes (560m, 1.1b, 1.7b, 3b, and 7.1b parameters) \u2013 XGLM (Lin et al., 2022) in 4 sizes (564m, 1.7b, 2.9b, and 7.5b parameters) \u2022 Instruction-tuned LLMs: \u2013 Bloomz (Muennighoff et al., 2023) in 5 sizes (560m, 1.1b, 1.7b, 3b, and 7.1b parameters) These models were chosen for several key reasons: 1. These models provide broad language coverage, allowing us to analyze performance across a diverse set of languages and maximize the linguistic diversity in our study. 2. By including MLMs, autoregressive models, and instruction-tuned LLMs, we can investigate how different model architectures influence performance. 3. The inclusion of models with varying parameter sizes allows us to investigate the interplay between model scale and the factors influencing performance. 4. mBERT and XLM-R, despite being relatively smaller models, have demonstrated competitive performance even compared to larger models like ChatGPT after fine-tuning (Lai et al., 2023; Zhu et al., 2023). 5. The inclusion of both Bloom and XGLM, both autoregressive models, allows us to investigate the impact of pretraining data composition. Bloom focuses more on low-resource languages during pretraining, whereas XGLM emphasizes high-resource languages. This deliberate selection enables us to analyze how the distribution of languages in the pretraining data affects performance across different resource levels. Note that we primarily focus on models that are open-source or have made the list of pretraining languages and data composition available. Additionally, we consider the following training and inference scenarios: \u2022 Zero-shot: GPT-3.5, Bloom, Bloomz, and XGLM were evaluated directly on the test set without any specific fine-tuning. This assesses the model\u2019s ability to generalize to unseen tasks and languages based on its pretrained knowledge. \u2022 Two-shot In-Context Learning (ICL): Bloom, Bloomz, and XGLM were also evaluated in twoshot ICL setting where the models were provided with two labeled examples for each class from the train set. This allows us to particularly investigate effective factors for improving performance of unseen languages. We opted for two demonstrations in ICL to keep the input length shorter than the context length of our models across all languages. \u2022 Full-shot: mBERT and XLM-R were fine-tuned on the SIB-200 training set and evaluated on the test set. For full-shot training of mBERT and XLM-R, we adhered to the hyperparameters recommended by the SIB-200 paper authors to ensure consistency with the original dataset benchmarks. For Bloom, Bloomz, and XGLM in both zero-shot and two-shot ICL settings, as well as for GPT-3.5 in zero-shot setting, we use prompts to frame the text classification task, which are detailed in Appendix A. 4 Results and Analysis Now we discuss the results of our comprehensive experiments. We focus on analyzing the performance of models across three distinct scenarios: ALL, SEEN, and UNSEEN. The ALL \fFigure 2: Decision tree for Bloom-560m (zero-shot, SEEN languages). \u201cGeneral resource level\u201c emerges as the most important feature, with a significant performance difference between languages above and below the 2.5 threshold (p < 0.001 as per Mann-Whitney U test). scenario considers all languages in the SIB-200 dataset for which resource level information is available5. The SEEN scenario focuses on languages included in the pretraining data of the respective MLLMs, while the UNSEEN scenario examines performance on languages not present in the pretraining data. In total, results are obtained from 93 distinct experimental settings (models of different sizes, training scenarios, and language categories of seen/unseen/all). To understand the complex interplay of multiple factors influencing MLLM performance, we employ decision tree analysis for statistical inference. This approach is well-suited for handling factors of different types, including categorical, ordinal, and numeric data. Decision trees are trained to predict the F1 score of models based on language features. By analyzing the resulting tree structure, we can gain insights into the relative importance of different features and their interactions. As decision trees were trained on the entirety of our data, traditional methods for testing their performance were not applicable. Instead, we employed the Mann-Whitney U test (Mann and Whitney, 1947), to ensure that the features appearing at the root of the decision trees were indeed relevant and contributed significantly to the differentiation between the language splits. This approach allowed us to validate the significance of the features identified by the decision tree in delineating 5This information is available for 190 languages. distinct language groups without relying solely on the performance metrics of the decision tree models themselves. Figure 2 presents the decision tree analysis for the Bloom-560m model on SEEN languages, revealing general resource level as the most influential feature. Specifically, the tree distinguishes between languages with resource levels below 2.5 (levels 0,1,2) and those above 2.5 (levels 3,4,5). Among the 44 SEEN languages, the 29 languages with resource levels below 2.5 exhibit a mean F1 score of 0.174, while the 15 languages with higher resource levels achieve a significantly higher mean F1 score of 0.379. A Mann-Whitney U test confirms a statistically significant difference in performance between these two groups (p < 0.001). This suggests that for the Bloom-560m model on SEEN languages, the general resource level of a language plays a crucial role in determining its performance, with higher resource levels leading to better performance. By employing this combined approach of decision tree analysis and statistical testing, we can effectively disentangle the complex relationships between various factors and their impact on MLLM performance. The summarized results6 of all 93 decision tree analyses are presented in Table 2. We observe distinct patterns in feature importance across the three scenarios: 6Detailed decision trees for all models and setups are available in our repository: https://github.com/PortNLP/ MLLMs_performance \fZero-shot Model ALL SEEN UNSEEN Bloom-560m Pretrain data (<=0.125%) Resource level (<=2.5) Script (Latin or not) Bloom-1b1 Pretrain data (<=0.125%) Resource level (<=2.5) Script (Devanagari or not) Bloom-1b7 Pretrain data (<=0.175%) Resource level (<=2.5) Script (Latin or not) Bloom-3b Pretrain data (<=0.175%) Resource level (<=2.5) Script (Latin or not) Bloom-7b1 Pretrain data (<=0.125%) Resource level (<=2.5) Script (Devanagari or not) Bloomz-560m Script (Latin or not) Pretrain data (<=0.03%) Script (Latin or not) Bloomz-1b1 Pretrain data (<=0.008%) Pretrain data (<=0.03%) Script (Latin or not) Bloomz-1b7 Pretrain data (<=0.008%) Pretrain data (<=0.03%) Script (Latin or not) Bloomz-3b Pretrain data (<=0.002%) Pretrain data (<=0.013%) Script (Latin or not) Bloomz-7b1 Pretrain data (<=0%) Pretrain data (<=0.9%) Script (Latin or not) XGLM-564m Pretrain data (<=0.003%) Resource level (<=2) Lang. family (Austronesian or not) XGLM-1.7b Pretrain data (<=0.006%) Pretrain data (<=1.487%) Script (Devanagari or not) XGLM-2.9b Pretrain data (<=0.003%) Script (Latin or not) Script (Devanagari or not) XGLM-7.5b Pretrain data (<=0%) Pretrain data (<=1.122%) Script (Devanagari or not) GPT-3.5 Resource level (<= 2.5) Pretrain data (<=5.312%) Lang. family (Indo-European or not) Two-shot ICL Model ALL SEEN UNSEEN Bloom-560m Pretrain data (<=0.045%) Pretrain data (<=0.045%) Lang. family (Indo-European or not) Bloom-1b1 Pretrain data (<=0.095%) Pretrain data (<=0.095%) Script (Latin or not) Bloom-1b7 Pretrain data (<=0.175%) Pretrain data (<=0.175%) Script (Latin or not) Bloom-3b Pretrain data (<=0.008%) Pretrain data (<=0.008%) Script (Latin or not) Bloom-7b1 Pretrain data (<=0.008%) Pretrain data (<=0.008%) Script (Latin or not) Bloomz-560m Pretrain data (<=0.03%) Pretrain data (<0.03%) Script (Devanagari or not) Bloomz-1b1 Pretrain data (<=0.008%) Pretrain data (<=0.013%) Script (Latin or not) Bloomz-1b7 Pretrain data (<=0.005%) Pretrain data (<=0.013%) Script (Cyrillic or not) Bloomz-3b Pretrain data (<=0%) Pretrain data (<=0.9%) Script (Latin or not) Bloomz-7b1 Pretrain data (<=0%) Pretrain data (<=0.013%) Script (Latin or not) XGLM-564m Pretrain data (<=0.003%) Pretrain data (<=0.095%) Lang. family (Niger-Congo or not) XGLM-1.7b Pretrain data (<=0.003%) Resource level (<=2) Script (Devanagari or not) XGLM-2.9b Pretrain data (<=0.003%) Script (Latin or not) Lang. family (Indo-European or not) XGLM-7.5b Pretrain data (<=0.003%) Pretrain data (<=0.15%) Lang. family (Indo-European or not) Full-shot Model ALL SEEN UNSEEN mBERT Pretrain data (<=3.786%) Pretrain data (<=8.627%) Lang. family (Indo-European or not) XLM-R Pretrain data (<=13.5%) Pretrain data (<=90%) Lang. family (Indo-European or not) Table 2: Top features identified by decision tree analysis for each model and scenario. For SEEN languages, pretraining data size and resource level dominate (except for XGLM-2.9b, where script type is most influential). For UNSEEN languages, linguistic characteristics (script type and language family) take precedence. All features exhibit statistically significant differences in performance (p < 0.001). ALL Languages: \u2022 For the ALL languages scenario, decision trees clearly reveal that pretraining data is the most influential factor in 29 out of 31 cases. Because ALL includes languages SEEN and UNSEEN, notably, our deeper look at the decision tree analyses indicates that this factor in most cases boils down to whether the language was part of the training set or not, rather than the amount of language-specific data, as indicated by the values \fFigure 3: F1 Score vs. model-specific pretraining data (percentage) for GPT-3.5, mBERT and XLM-R models. of the pretraining data percentages which range from 0% to at most 0.175%. GPT-3.5 model draws the distinction along general resource levels whether a language is low resource (0, 1, or 2) or level 3 and higher. SEEN Languages: \u2022 For SEEN languages, model-specific pretraining data continues to remain the most influential factor in 22 out of 31 model and scenario combinations. However, this time because there are no unseen languages in the mix, the model performance seems to be impacted by the amount of pretraining data, as indicated by the slightly higher percentage values as compared to the ALL languages scenario. \u2022 Interestingly, general resource availability based on linguistic diversity taxonomy (Joshi et al., 2020) appears to be the most important factor for Bloom models in the zero-shot setup, as well as for xglm-564m (zero-shot) and xglm-1.7b (twoshot). For Bloom models, the distinction is along resource levels 0/1/2 or higher, whereas for xglm models, it is along 0/1 and higher. Additionally, xglm-2.9b in both zero-shot and two-shot scenarios shows a stronger influence of script type (Latin or not). These cases indicate that factors beyond pretraining data size can also play a significant role for specific models and settings. \u2022 Furthermore, Figure 3 plots the performance of mBERT, XLM-R, and GPT-3.5 models in relation to model-specific pretraining data amounts. The figure demonstrates a clear trend: as the modelspecific language data increases, so does the model\u2019s performance. This observation aligns with the finding that pretraining data size is a crucial factor for SEEN languages. UNSEEN Languages: \u2022 In contrast to SEEN languages, UNSEEN languages show quite a different pattern. Naturally, because UNSEEN languages do not have pretraining data as one of their relevant factors, it is absent from this column. However, out of 31 models, 23 are most impacted by script type, and 8 are most influenced by language family. This shift in importance towards linguistic features suggests that when models encounter unfamiliar languages, they rely more heavily on similarities in writing systems to generalize from their existing knowledge. \u2022 Within the scripts and language families, there are nuanced differences. For instance, while generally the models make the distinction along the lines of whether the script is Latin or not, occassionally Devanagari script also seems important, particularly for XGLM models. Similarly, while Indo-European is the most common influential language family, we also observe an instance each of Austronesian and Niger-Congo. Additionally, models of different sizes from the same family may prefer not just a different script or a different language family when moving from zero-shot to two-shot setting, they may prefer an entirely different factor (e.g., Bloom-560m in zero-shot vs. two-shot settings), further complicating the matters. 5 Discussion Our comprehensive analysis of 6 multilingual models on the SIB-200 dataset reveals valuable insights into the factors influencing their performance across a diverse range of languages. Our key findings can be summarized as follows: \u2022 Pretraining data size consistently emerges as a crucial factor, but the distinction is less along \fthe quantity of data but rather whether the languages have been encountered during training or not. \u2022 For UNSEEN languages, script type and language family are influential, suggesting that MLLMs rely on cross-lingual transfer learning to generalize to unfamiliar languages. \u2022 General resource availability plays a less prominent role overall but appears to be important for one specific model under one setting (Bloom in zero-shot for seen languages). \u2022 Interestingly, the performance of Bloomz, an instruction-tuned model, is more influenced by the distribution of languages in its pretraining corpus than the fine-tuned dataset used for instruction tuning. This suggests that the initial pretraining stage plays a crucial role in shaping the model\u2019s capabilities, even after further fine-tuning for specific tasks. \u2022 Finally, our analysis also indicates that while model size and architecture may influence overall performance, they do not significantly alter the most important features identified by the decision trees. The distribution of languages in the pretraining data and the linguistic characteristics of the target languages consistently emerge as the dominant factors regardless of the specific model architecture or scale. Several future directions remain to be explored. We observed that script type can be more influential for specific models and settings. Further investigation is needed to understand the reasons behind these preferences and how they can be leveraged to achieve more consistent performance across languages. It is also not clear why models lean towards different factors under different settings (for instance, resource level is important in Bloom-560m zero-shot setting but pretraining data is important in its two-shot ICL setting). 6 Conclusion This study analyzed 6 multilingual language models on the SIB-200 dataset, revealing key insights into their performance across around 200 languages. We found that the size of the pretraining data significantly affects performance. For unseen languages, script type and language family become more crucial, highlighting the importance of crosslingual transfer learning. While general resource availability plays a less prominent role overall, it can be significant for specific models and settings. Interestingly, model size and architecture do not significantly change the most important features identified in our analysis. Our work contributes to a deeper understanding of MLLMs and hopes to guide the development of more effective and equitable multilingual NLP systems. Limitations This study provides insights into multilingual language model performance, but it is important to acknowledge certain limitations. The SIB-200 dataset, while extensive, may contain biases in language representation and genre distribution, potentially affecting the generalizability of our findings. Additionally, our analysis focuses on the text classification task, and the findings may not directly generalize to other NLP tasks. While we analyzed a diverse set of models, our findings may not be fully representative of the entire MLLM landscape. Finally, our analysis is based on the current state of MLLMs, and the relative importance of different factors may change as these models continue to evolve. Future research should address these limitations by expanding to more diverse datasets, investigating different NLP tasks, evaluating a broader range of models, and conducting longitudinal studies. Ethics Statement The experimental setup and code implementation ensured adherence to ethical guidelines, data usage agreements, and compliance with the terms of service of the respective language models and data sources. The research team also recognized the importance of inclusivity and fairness by considering a diverse set of languages and language families in the evaluation, thus avoiding biases and promoting balanced representation. Acknowledgements We are grateful to the anonymous reviewers whose feedback and thought-provoking questions enhanced this paper. The engaging discussions and collaborative spirit within the PortNLP research group were instrumental in shaping this research. We acknowledge the National Science Foundation for their financial support through grants (CRII:RI \f2246174 and SAI-P 2228783), which made this work possible."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"url": "http://arxiv.org/abs/2310.05404v2",
|
| 37 |
+
"title": "Exploring the Maze of Multilingual Modeling",
|
| 38 |
+
"abstract": "Multilingual language models have gained significant attention in recent\nyears, enabling the development of applications that meet diverse linguistic\ncontexts. In this paper, we present a comprehensive evaluation of three popular\nmultilingual language models: mBERT, XLM-R, and GPT-3. We assess their\nperformance across a diverse set of languages, with a focus on understanding\nthe impact of resource availability (general and model-specific), language\nfamily, script type, and word order on model performance, under two distinct\ntasks - text classification and text generation. Our findings reveal that while\nthe amount of language-specific pretraining data plays a crucial role in model\nperformance, we also identify other factors such as general resource\navailability, language family, and script type, as important features. We hope\nthat our study contributes to a deeper understanding of multilingual language\nmodels to enhance their performance across languages and linguistic contexts.",
|
| 39 |
+
"authors": "Sina Bagheri Nezhad, Ameeta Agrawal",
|
| 40 |
+
"published": "2023-10-09",
|
| 41 |
+
"updated": "2024-02-12",
|
| 42 |
+
"primary_cat": "cs.CL",
|
| 43 |
+
"cats": [
|
| 44 |
+
"cs.CL",
|
| 45 |
+
"I.2.7"
|
| 46 |
+
],
|
| 47 |
+
"main_content": "Introduction Multilingual language models have transformed natural language processing (NLP) by enabling applications such as machine translation and sentiment analysis in multiple languages. Continuous efforts are dedicated to understanding of multilingual models\u2019 performance across languages with distinct linguistic properties (Devlin et al., 2019; Wu and Dredze, 2020; Scao et al., 2022; Lai et al., 2023; Ahuja et al., 2023). Despite several efforts, linguistic disparity in NLP persists (Joshi et al., 2020; Ranathunga and de Silva, 2022). It remains important to to not only improve the performance of the models for most languages of the world, but also to make them safer by focusing on alignment beyond English (Wang et al., 2023). However, it remains unclear which factors truly contribute to the development of effective multilingual models. Several studies indicate the amount of language-specific data available in the pretraining corpus as one of the key factors (Wu and Dredze, 2020). However, most studies are conducted for a limited set of languages on a given task, focusing on a limited set of training paradigm (such as masked language modeling (MLM) or autoregressive), and especially on a handful of factors. In this work, we contribute to this area of research by comprehensively evaluating three multilingual language models of type MLM and autoregressive (mBERT (Devlin et al., 2019), XLM-R (Conneau et al., 2020) and GPT-3 (Brown et al., 2020)) under two types of tasks (text classification and text generation) covering a wide range of languages. More imporantly, we consider five different factors in our analysis (pretraining data size, general resource availability levels, language family, script type, and word order). We leverage the recently introduced SIB-200 dataset as well as create a novel multilingual dataset of recently published BBC news articles in 43 languages, called mBBC, which allows us to evaluate on text that may not have been seen by these models during their training. Through an extensive multivariate and univariate analysis, we find that while model-specific resource availability strongly influences model performance in certain cases, this does not appear to be true for all models and all tasks. Other factors identified as important include general resource availability, language family, and script type. We hope that our findings will help researchers and practitioners to develop more inclusive and effective multilingual NLP systems. 2 Related Work Multilingual NLP research has made significant strides, introducing the development and evaluation of several multilingual language models trained on diverse and combined language datasets (mBERT arXiv:2310.05404v2 [cs.CL] 12 Feb 2024 \fReference Factors Task Languages Wu and Dredze (2020) Pretraining data size, Task-specific data size, Vocabulary size NER 99 Scao et al. (2022) Pretraining data size, Task-specific data size, Language family, Language script Probing 17 Shliazhko et al. (2022) Pretraining data size, Language script, Model size Perplexity 61 Ahuja et al. (2023) Pretraining data size, Tokenizer fertility Classification, QA, Sequence Labeling, NLG, RAI 2-48 Ours Pretraining data size, Language family, Language script, General resource availability, Word order Text classification, Text generation 204, 43 Table 1: Factors considered in related works and this work. Factors distinct to our work are shown in bold. (Devlin et al., 2019), XLM-R (Conneau et al., 2020), mBART (Liu et al., 2020), mT5 (Xue et al., 2021), BLOOM (Scao et al., 2022), GPT-3 (Brown et al., 2020), GPT-4 (OpenAI, 2023), LLaMA (Touvron et al., 2023), PaLM (Chowdhery et al., 2022), PaLM 2 (Anil et al., 2023), and others). Factors that may have an impact on the performance of multilingual models are being increasingly investigated. Wu and Dredze (2020) used the named entity recognition task and considered three factors that might affect the downstream task performance: pretraining data size, task-specific data size, and vocabulary size in task-specific data. They found that the larger the task-specific supervised dataset, the better the downstream performance on NER. Scao et al. (2022) studied the correlation between probing performance and several factors, and found that the results of BLOOM-1B7 are highly correlated with language family, taskspecific dataset size, and pretraining dataset size. Shliazhko et al. (2022) used perplexity to assess the impact of language script, pretraining corpus size, and model size, and found that the language modeling performance depends on the model size and the pretraining corpus size in a language, whereas Ahuja et al. (2023) studied the impact of tokenizer fertility and pretraining data, and found that the models perform worse in languages for which the tokenizer is of poor quality, and that the amount of training data available in a language can partially explain some results. In contrast, we conduct a more holistic investigation to provide better insights related to three multilingual language models (both MLM and autoregressive) across two distinct tasks (a supervised task such as text classification, and an unsupervised text generation task). Moreover, prior work studied only a few languages for a given task primarily because of limited availability of annotated datasets. The recent landscape of multilingual datasets, however, has seen remarkable contributions (Costa-juss\u00e0 et al., 2022; Adelani et al., 2023; ImaniGooghari et al., 2023), offering valuable resources for diverse linguistic analysis. While these resources are used in our analysis, we further create mBBC to support unsupervised modeling, encompassing news from 2023 in 43 languages. Concerns of data contamination remain persistent (Golchin and Surdeanu, 2023; Deng et al., 2023) and using mBBC ensures that the evaluation uses data that was unseen by the language models considered in our study. Moreover, it addresses the need for a dataset that can be leveraged without fine-tuning language models, mitigating the impact of hyperparameter tuning in our analytical pursuits. Table 1 presents an overview of some of the related works. 3 Exploring the Maze of Multilingual Modeling Several factors can influence the performance of multilingual models. In this study, we consider three multilingual models, five distinct factors related to typology and data, and two types of NLP tasks. 3.1 Models The three multilingual language models studied in our analysis include mBERT (bert-base-multilingualcased) (Devlin et al., 2019), XLM-R (xlm-robertabase) (Conneau et al., 2020), and GPT-3 (textdavinci-003) (Brown et al., 2020). mBERT and XLM-R are masked language models, while GPT-3 is an autoregressive language model. These models were selected because of their extensive language support, allowing us to maximize the linguistic diversity covered in our analysis. Additionally, the choice of mBERT and XLM-R was influenced by the fact that these models, after fine-tuning, continue to \fdemonstrate competitive performance, even rivaling larger language models such as ChatGPT (Lai et al., 2023; Zhu et al., 2023). 3.2 Typology and Data Factors We consider various factors to understand their impact on model performance including: \u2022 Pretraining Data Size (Train Token (TT)): This is the amount of language-specific pretraining data (million tokens) used by each model during training1. \u2022 General Resource Availability (Res Level): Beyond model-specific resources such as pretraining data size, we also consider a more general notion of resource availability, as per the linguistic diversity taxonomy which categorizes languages into six resource levels (Joshi et al., 2020). This classification helps us understand the influence of more general resource availability on model performance, and may serve as a proxy when modelspecific statistics may not be available (such as in the case of commercial models). \u2022 Language Family (Lang Family): The language families that the languages belong to capture some of their linguistic relationships. The information was sourced from the Ethnologue2 (Ethnologue, 2022). \u2022 Script: The script of a language refers to the writing system it employs. This information was sourced from ScriptSource3. \u2022 Word Order: Word order refers to the arrangement of syntactic constituents within a language. This feature captures the structural variations in how languages express relationships between subject, object, and verb (e.g., Subject-ObjectVerb (SOV), Subject-Verb-Object (SVO), and Verb-Subject-Object (VSO)). This information was sourced from Dryer and Haspelmath (2013). 3.3 Tasks and Datasets We systematically study the multilingual models under two distinct and important tasks \u2013 text clas1We obtained the Train Token (TT) values for mBERT from https://github.com/mayhewsw/ multilingual-data-stats, for XLM-R from its paper (Conneau et al., 2020), and for GPT-3 we use proxy statistics from https://github.com/openai/gpt-3/blob/master/ dataset_statistics/languages_by_word_count.csv. 2https://www.ethnologue.com 3https://www.scriptsource.org sification and text generation (Chang and Bergen, 2023). Text Classification on SIB-200 dataset The SIB-200 dataset (Adelani et al., 2023) facilitates the text classification task in 204 languages, where each instance of text is categorized into one of six classes. The performance is measured in terms of F1 score. The mBERT and XLM-R models were fine-tuned on the training set of SIB-200 and evaluated on a separate test set. The GPT-3 model was used under the zero-shot setting without any specific fine-tuning. Default train and test splits with hyperparameters introduced by the authors of SIB-200 were used. Text Generation on mBBC dataset As autoregressive models have become increasingly popular, so has the task of text generation, where the models select each next token given some context. Such a task presents a complemetary way of evaluation by not requiring any labeled data. Given a sequence of n tokens, the models predict the next token n + 1. We formulate this as a binary classification task: if the ground truth token matches any token in the top k predicted tokens generated by the models, then the output is considered to be correct4. The results are reported in terms of accuracy. For each model, we utilize their respective tokenizers to preprocess the input sequences. For each language in mBBC, we experiment with 2000 samples, which allows us to obtain statistically significant results while ensuring computational feasibility. The experimental procedure and implementation details are described in Appendix B. mBBC (multilingual BBC) To create this new multilingual news dataset, news articles were gathered from BBC news in 43 different languages5, which, in constrast to SIB-200, presents a relatively real-world snapshot of language distribution based on the fact that BBC broadcasts news in these 43 languages providing a global coverage. Most importantly, the articles are sourced from mid 2023 which allows us to be reasonably confident that the models considered in our study have not been exposed to this data during their training, thereby limiting concerns of data contamination. Additionally, by exclusively sourcing articles from a single source, consistency in tone and writing style across 4We experimented with various hyperparameter settings and finally empirically set n = 30 and k = 5. 5https://www.bbc.co.uk/ws/languages \fFigure 1: Distribution of resource level in SIB-200 and mBBC datasets. Figure 2: Distribution of language family in SIB-200 and mBBC datasets. (a) Decision tree visualization for GPT-3 model on SIB-200 dataset (b) Decision tree visualization for GPT-3 model on mBBC dataset Figure 3: Decision tree visualization. Value refers to the expected F1 score/accuracy of the model. diverse languages is maintained, facilitating a more comparable evaluation. \fThe dataset includes languages from 12 language families and 16 scripts. Detailed statistics of mBBC dataset\u2019s languages, including language family, script, and other relevant linguistic characteristics are presented in Appendix A. Among the languages available in mBBC, mBERT was able to support 32, while XLM-R supported 38, with 31 of them overlapping with mBERT\u2019s supported languages. GPT-3 was run on all 43 languages in our dataset. 3.4 Analysis of SIB-200 and mBBC Figure 1 shows that most languages present in SIB200 are classified as resource level 1, which is intentional by design. However, mBBC which was created by what was naturally available on the BBC website also contains a significant number of low resource languages, with the majority falling under resource level 1. This indicates that while linguistic resources may be limited for many languages, they are still utilized by communities and services such as BBC News in the real world, emphasizing the need for considerable attention to these underserved languages. Figure 2 shows that Indo-European languages dominate both datasets (about 36% of SIB-200 and 44% of mBBC), reflecting their status as the most widely spoken language family in the world (Ethnologue, 2022). In SIB-200, the two other language families with considerable presence include NigerCongo and Austronesian, whereas in mBBC, it is Afro-Asiatic and Niger-Congo. In terms of writing systems, the Latin script is the most common across both the datasets, being used by nearly 70% of the global population (Vaughan, 2020). The next two most frequent scripts are Arabic and Cyrillic across both the datasets (see Figure 11 in Appendix A). 4 Results and Analysis In this section, we present the results of our evaluation of multilingual language models and analyze their performance based on various factors including resources (model-specific and general), language family, script, word order, and their interactions. 4.1 Multivariate Analysis To collectively analyze and understand the intricate interplay of multiple factors, which are of different types such as categorical, ordinal, and numeric, we use decision tree analysis for statistical inference to identify influential features. This was followed by Mann-Whitney U test (Mann and Whitney, 1947) for the classification task and Fisher\u2019s exact test (Fisher, 1922) for the generation task to determine significant differences. Decision trees are trained to predict the accuracy and F1 score of models based on language features, and thus, analyzing them allows us to gain insights into the significance of features. Figure 3 presents the decision tree analysis of the GPT-3 model for SIB-200 and mBBC datasets. Other results are included in Appendix C and D. According to the analysis, for SIB-200, general resource level (more or less than 2.5) is identified as the most important feature. For lower resource languages (the left child node), language family is the next most important feature, whereas for higher resource languages (the right child node), the train token size is the next most important feature. For mBBC, script type (Latin or not) appears to be the most important feature. All results are statistically significant (p < 0.001). SIB-200 mBBC mBERT Pretraining data Language family XLM-R Pretraining data Script type GPT-3 Resource Level Script type Table 2: Top feature in decision trees. It shows for the downstream task, training size and resource level is the top feature while for text generation task linguistic characters are more important. The p-values for all features in this table are less than 0.001. The p-values for SIB-200 is calculated based on Mann-Whitney U test and p-values for mBBC is calculated by Fisher\u2019s exact test. Table 2 summarizes the results of all the decision tree analyses (full results are included in Appendix C and D). In general, for text classification on SIB200, two out of three models are most impacted by the model-specific pretraining data size. However, general resource availability based on linguistic diversity taxonomy (Joshi et al., 2020) appears to be the most important factor for GPT-3. Interestingly, however, for text generation using mBBC dataset, the decision tree analysis reveals factors other than resource availability to be most important. For GPT-3 and XLM-R, it is script type, an often overlooked factor, whereas for mBERT, it is language family. Taken together, these results suggest that there \f(a) F1 Score vs. Train Token for SIB-200. (b) Accuracy vs. Train Token for mBBC Figure 4: Correlation analysis between performance and pretraining data (train tokens) (a) Average F1 score of mBERT, XLM-R, and GPT-3 across different resource levels on SIB-200. (b) Average accuracy of mBERT, XLM-R, and GPT-3 across different resource levels on mBBC Figure 5: Model results across different resource levels appear to be model-based as well as task-based differences that affect what is considered to be the most important factor in predicting a model\u2019s performance on a given task, and that in only 2 out of 6 settings (3 models x 2 tasks), pretraining data size was indicated as the most important factor, with general resource levels, script type, and language family also emerging as important factors in other settings. In other words, the same model may be impacted by different factors depending on the task at hand (classification vs. generation). 4.2 Univariate Analysis We dig deeper into the outputs of our analyses to examine the impact of certain selected factors that were identified as important. The full set of results are presented in Appendix C and D. Impact of Pretraining Data Size (Train Token) Figure 4 shows that for text classification using SIB200, mBERT and XLM-R clearly obtain marked improvements as the language-specific pretraining data (train tokens) available to the models increases. To a lesser extent, this observation is also noticed for GPT-3. For text generation using mBBC, weaker relationship between performance and train tokens is observed for mBERT and GPT-3, while XLM-R fails to show any clear patterns. Impact of General Resource Availability (Res Level) Figure 5 illustrates the performance of mBERT, XLM-R, and GPT-3, across varying resource levels. For the text classification task on SIB-200, mBERT and XLM-R models perform similarly, while considerably outperforming GPT-3. However, in terms of trends related to resource levels, the results reinforce the significance of resource levels, with the lower resource levels (0, 1, and 2) showing weaker performance than relatively higher resource levels (3, 4, and 5), consistent \fFigure 6: Average accuracy of mBERT, XLM-R, and GPT-3 across language families and resource levels for text classification on SIB-200. The results within each language family are averaged for all languages of the same resource levels across all three models. For the text generation task on mBBC, as expected it is GPT-3, the autoregressive model, that performs much better than the MLM models mBERT and XLM-R models. However, for this task, the results are not as clearly distinct. While the highest resource level (5) continues to show a slight advantage over all the other levels, the gap is noticeably smaller. In other words, except for resource 5 level languages, increased resources do not necessarily guarantee improved performance. The results of languages in level 2 are often lower than those of 0 or 1, implying that the influence of resource availability on model performance is less pronounced in text generation task on mBBC. Impact of Language Family Figures 6 and 7 present the results of language family-based analysis on text classification and text generation tasks, respectively. In both the cases, we notice that generally higher resource levels afford higher performance across all language families. However, there is a considerable difference in the performance between the same resource levels but different language families, e.g., level 5 of Afro-Asiatic as compared to level 5 of Sino-Tibetan (Figure 6) or level 3 of Austronesian as compared to level 3 of Dravidian or Indo-European (Figure 7). While some of these differences may be in part due to the different number of languages present in each group, the results of this fine-grained analysis suggest that resource levels alone may not be sufficient indicator of performance. Moreover, the results of such a fine-grained analysis show no single language family as the most dominant feature. These findings demonstrate the complex relationship between language families, resource availability, and model performance. While resource availability is important, other factors also influence performance within specific language families. Impact of Script Type Next, we analyze the impact of script types on multilingual language model performance (Figure 13 in Appendix). One notable observation is that the GPT-3 model reveals a \fFigure 7: Average accuracy of mBERT, XLM-R, and GPT-3 across language families and resource levels for text generation on mBBC. The results within each language family are averaged for all languages of the same resource levels. consistent superiority of the Latin script over other scripts in the text generation task. 5 Discussion Our study evaluates the performance of multilingual language models mBERT, XLM-R, and GPT-3. Some key observations can be summarized as follows: \u2022 Resource availability strongly correlates with model performance in text classification tasks but less so in text generation tasks. Instead, text generation on mBBC was influenced by factors such as language family and script type. \u2022 The relationship between resource availability, language families, and model performance remains complex. While some language families exhibited consistent patterns across models, others showed varying results. Moreover, among the three models studied, there were notable differences, potentially due to their different training corpora. \u2022 The impact of script type on model performance varied among the evaluated models. While mBERT and XLM-R showed no clear patterns between script types, the GPT-3 model consistently performed better with the Latin script for text generation task. 6 Conclusion Our extensive evaluation of multilingual language models across two tasks consisting of 203 and 43 diverse languages, respectively, highlighted several interesting results. While certain models and tasks were impacted by resource availability (modelspecific or general), language family and script types were found to be important factors for other models when used in another task. We plan to extend our research to incorporate newer large language models as well as explore the impact of additional factors, such as language-specific morphological features or syntactic structures, on model performance. Limitations Our study has several limitations that warrant acknowledgement. Firstly, the evaluation relied on two datasets, which may not fully encompass the diversity of languages and language usages. To obtain a more comprehensive understanding of multilingual language model performance, future work should incorporate additional datasets from diverse domains and genres. Another limitation is the absence of fine-grained language identification and preprocessing steps in our data collection process when creating mBBC dataset. While this enabled direct retrieval of articles from specific news sources in each language, \fit may have introduced noise and inconsistencies into the dataset. Future research should consider integrating robust language identification and preprocessing techniques to enhance the quality and consistency of the dataset. Ethics Statement The experimental setup and code implementation ensured adherence to ethical guidelines, data usage agreements, and compliance with the terms of service of the respective language models and data sources. The research team also recognized the importance of inclusivity and fairness by considering a diverse set of languages and language families in the evaluation, thereby avoiding biases and promoting balanced representation."
|
| 48 |
+
}
|
| 49 |
+
]
|
| 50 |
+
},
|
| 51 |
+
"edge_feat": {}
|
| 52 |
+
}
|
| 53 |
+
}
|
title_31K_G/test_title_long_2404.19168v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19178v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19205v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19211v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19227v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19232v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19232v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19245v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19277v1.json
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"url": "http://arxiv.org/abs/2404.19277v1",
|
| 3 |
+
"title": "Bridge to Non-Barrier Communication: Gloss-Prompted Fine-grained Cued Speech Gesture Generation with Diffusion Model",
|
| 4 |
+
"abstract": "Cued Speech (CS) is an advanced visual phonetic encoding system that\nintegrates lip reading with hand codings, enabling people with hearing\nimpairments to communicate efficiently. CS video generation aims to produce\nspecific lip and gesture movements of CS from audio or text inputs. The main\nchallenge is that given limited CS data, we strive to simultaneously generate\nfine-grained hand and finger movements, as well as lip movements, meanwhile the\ntwo kinds of movements need to be asynchronously aligned. Existing CS\ngeneration methods are fragile and prone to poor performance due to\ntemplate-based statistical models and careful hand-crafted pre-processing to\nfit the models. Therefore, we propose a novel Gloss-prompted Diffusion-based CS\nGesture generation framework (called GlossDiff). Specifically, to integrate\nadditional linguistic rules knowledge into the model. we first introduce a\nbridging instruction called \\textbf{Gloss}, which is an automatically generated\ndescriptive text to establish a direct and more delicate semantic connection\nbetween spoken language and CS gestures. Moreover, we first suggest rhythm is\nan important paralinguistic feature for CS to improve the communication\nefficacy. Therefore, we propose a novel Audio-driven Rhythmic Module (ARM) to\nlearn rhythm that matches audio speech. Moreover, in this work, we design,\nrecord, and publish the first Chinese CS dataset with four CS cuers. Extensive\nexperiments demonstrate that our method quantitatively and qualitatively\noutperforms current state-of-the-art (SOTA) methods. We release the code and\ndata at https://glossdiff.github.io/.",
|
| 5 |
+
"authors": "Wentao Lei, Li Liu, Jun Wang",
|
| 6 |
+
"published": "2024-04-30",
|
| 7 |
+
"updated": "2024-04-30",
|
| 8 |
+
"primary_cat": "cs.CV",
|
| 9 |
+
"cats": [
|
| 10 |
+
"cs.CV"
|
| 11 |
+
],
|
| 12 |
+
"label": "Original Paper",
|
| 13 |
+
"paper_cat": "Diffusion AND Model",
|
| 14 |
+
"gt": "Bridge to Non-Barrier Communication: Gloss-Prompted Fine-grained Cued Speech Gesture Generation with Diffusion Model",
|
| 15 |
+
"main_content": "Introduction According to the World Health Organization (WHO), more than 5% of the global population (466 million) suffers from the hearing loss. As a predominant communication method for hearing-impaired people, Lip reading [Puviarasan and \u2217Corresponding Author: avrillliu@hkust-gz.edu.cn. Palanivel, 2011; Fernandez-Lopez et al., 2017] has a major defect of visual confusion. For instance, it struggles to differentiate pronunciations with similar labial shapes, such as [u] and [y], posing challenges for hearing-impaired individuals in accessing spoken language through conventional education. To tackle the limitations of lip reading, and to improve the reading skills of individuals with hearing impairments, in 1967, Cornett introduced the Cued Speech (CS) system [Cornett, 1967], which employs several hand codings (i.e., finger shapes and hand positions) to complement lip reading, providing a clear visual representation of all phonemes in spoken language [Puviarasan and Palanivel, 2011; Fernandez-Lopez et al., 2017]. For instance, in Mandarin Chinese CS (MCCS) [Liu and Feng, 2019] (see Fig. 1(a)), it utilizes five hand positions for encoding vowel groups and eight finger shapes for encoding consonant groups. With CS, individuals with hearing impairments can differentiate sounds that might appear similar when observed on lips by incorporating hand information. Another widely adopted communication method is Sign Language (SL) [Stokoe, 2005; Liddell and Johnson, 1989; Timothy, 2003]. It is crucial to emphasize that CS is not a visual language like SL; instead, it is a coding system of spoken language [Cornett, 1967]. In addition, studies indicate that CS can be learned much more quickly than SL [Reynolds, 2007]. Given that CS can effectively promote non-barrier communication, audio/text to CS gestures video generation draws researchers\u2019 attention. It should be noted that comparing to text, CS is more friendly and more easily adopted by the hearing impaired who are illiterate [Cox et al., 2002; Power et al., 2007]. The multi-modal CS gesture generation is a challenging task for the following reasons: 1) high requirement for finegrained and accurate gesture generation, as shown in Figure 1(a), where nuances in the hand\u2019s position and fingers\u2019 shape lead to quite different semantic meanings; 2) the limited size of CS datasets and expensive annotation cost of complicated fine-grained CS gestures. To address these challenges, we design a novel Gloss-Prompted Diffusion-based CS Gesture generation framework (GlossDiff). Specifically, we first propose a CS gloss, which is a direct motion instruction for bridging the gap between spoken language and CS gestures. It is automatically generated by LLM based on the encoding rule of CS in Figure 1(a). As shown in Figure 1(b), when expressing the word \u201ctree\u201d, which is pronounced as \u201c/\u00f9/ /u/\u201d in arXiv:2404.19277v1 [cs.CV] 30 Apr 2024 \fFigure 1: The details of CS rules and conversion process. (a) is the chart for the Mandarin Chinese Cued Speech (figure from [3]), where five different hand positions are used to code vowels, and eight finger shapes are used to code consonants in Mandarin Chinese. (b) shows the proposed instructional gloss, which directly links the text to the CS movements. Chinese, we generate the intermediate instruction text (i.e., gloss) to describe the process of using CS gestures to express this word, i.e., Stretch out two fingers apart without closing them and place the hand near the neck. Besides, we design a Gloss-Prompted Diffusion Model that can generate accurate hand and finger movements. Moreover, rhythm is a critical paralinguistic information in spoken language. As a coding system for spoken languages, we suggest natural rhythm dynamics should also be considered as a very important feature for CS\u2019s complete semantic expression. More specifically, the rhythm here refers to the ability to generate multi-modal CS speech gesture movements (i.e., hand and finger movements), which match the phoneme durations and utterance prosody of speech. Unfortunately, previous works have not pay enough attention to this. To this end, we propose an Audio-driven Rhythmic Module (ARM) that considers the overall rhythm of the CS movements aligning with speech signals. We leverage the large-scale WavLM [Chen et al., 2022] to extract audio features, which we demonstrate outperforming traditional MFCC features. We summarized our contributions as follows: 1) A novel GlossDiff framework that simultaneously generates finegrained hand position, finger movements, and lip reading in CS. Specifically, we introduce a CS gloss, which establishes a direct link between text/audio and CS hand movements, enabling more specific prompts for an accurate fine-grained CS gesture generation. 2) A new module ARM that improves the overall rhythm of the CS movements. 3) Publication of the first multi-cuer large-scale Mandarin Chinese CS (MCCS) dataset, which contains four cuers1 and 4000 CS videos. 4) Extensive experiments conducted on the MCCS dataset show the proposed GlossDiff achieves SOTA performance under different metrics. The qualitative and ablation studies, as well as user studies, further verify the effectiveness of the proposed model. 1The people who perform CS are called the cuer 2 Related work 2.1 Cued Speech Generation In prior work, early attempts at CS gesture generation [Duchnowski et al., 1998; Bailly et al., 2008] are mainly rulebased. Notably, in [Duchnowski et al., 1998], specific keywords were manually selected, along with low-context sentences [Rothauser, 1969], and manual templates for corresponding hand gestures were predefined. This processing involved CS recognition, followed by mapping the recognized text to the hand templates. However, this method relied heavily on hand-crafted designs, which constrained the expressiveness of CS gestures and increased the required manual effort. In [Bailly et al., 2008], a post-processing algorithm was introduced to refine synthesized hand gestures, including adjustments for hand rotation and translation. However, this approach required prior human knowledge to adapt the algorithm to new images, resulting in limited robustness. To the best of our knowledge, there is still a gap in research about end-to-end deep learning-based CS gesture generation. 2.2 Co-speech and Sign Language Generation The generation of Co-speech gestures involves generating body movements corresponding to audio input. Previous studies mainly developed large speech-gesture datasets to learn how speech audio maps to human skeletons using deep learning, as in [Ao et al., 2022]. To make gestures more expressive, some methods use Generative Adversarial Networks (GANs) for more realistic results [Ginosar et al., 2019; Youngwoo et al., 2020]. Recently, diffusion models like DiffGesture [Zhu et al., 2023], effectively links audio and gestures while keeping time consistency, allowing for highquality Co-speech gestures. However, Co-speech gesture generation focuses on fluency and style rather than gesture fine-grained accuracy. Existing methods cannot generate accurate subtle CS hand gestures. In the literature, there are several Sign Language (SL) generation methods: 1) The Neural Machine Translation approach from [Stoll et al., 2020] sees SL generation as translation, using neural models to process SL text. 2) The Motion Graph method in [Stoll et al., 2020] uses motion graphics to make a directed graph from motion capture data for \fFigure 2: The overall framework of the proposed GlossDiff, where (a), (b), (c) represent the Knowledge Infusion Module, Audio Rhythmic Module and Diffusion-based generation module, respectively. SL creation. 3) Conditional generation methods, like GANs and VAEs, are also used for SL gestures. 4) Some researches have introduced transformer-based models for SL, as mentioned in [Ben et al., 2020]. Despite these advancements in SL gesture generation, applying these methods to CS gestures has limitations. Firstly, CS gesture generation necessitates more precise methods to achieve complex finegrained gesture generation, while SL gesture generation is more coarse-grained. Secondly, SL gesture is not related with lip-reading, thus cannot match the speech rhythm and gesture-speech asynchrony characteristics [Liu et al., 2021; Liu and Liu, 2023] in CS gesture generation. 2.3 NeRF and Diffusion-based Gesture Generation NeRF is a novel technique in 3D modeling, which effectively creates highly detailed and photo-realistic static scenes from 2D images. Its application has been extended to generating life-like talking head models [Guo et al., 2021], demonstrating NeRF\u2019s capability in handling subtle facial movements and expressions. However, the application of NeRF in fullbody gesture generation is relatively limited. Additionally, the high requirements for data and computation further constrain its use in CS gesture generation. Currently, in the field of human gesture generation, diffusion models [Ho et al., 2020] are predominantly used in two main applications: generating comparably large body movements (e.g., human walking) [Tevet et al., 2022b; Zhang et al., 2022; Zhao et al., 2023; Ao et al., 2023] and generating poses in Co-speech scenarios [Ji et al., 2023; Zhi et al., 2023; Yang et al., 2023]. However, the existing approaches lack the capability to tackle fine-grained gesture generation. Additionally, they primarily focus on body poses without lip movements. Lastly, their diffusion models require extensive training data, which is not feasible given the limited dataset in our CS scenarios. 3 Method In this section, we provide a comprehensive description of our proposed method, GlossDiff, designed for rhythm-aware CS gesture generation, which seamlessly integrates domainspecific knowledge for CS generation. As shown in Figure 2, our GlossDiff framework consists of three primary components: the knowledge infusion module, the rhythmic module, and the Diffusion-based generation module. 3.1 Problem Formulation Automatic CS gesture generation involves generating the corresponding landmarks sequence of CS gesture M \u2217, given an audio signal A and the text T. In the task of automatic multimodal CS gesture generation, the combined features of A, T, and the generated rhythmic information are input into the CS gesture generator. The final CS gestures (M \u2217) including lips, fingers, and hand positions are obtained by minimizing: L X i=1 ||M \u2217 i \u2212Mi||, (1) where L represents the frame count of the current CS video. The ground truth CS gesture landmarks Mi in the i-th frame of the CS video is obtained by the Expose method [Choutas et al., 2020]. M \u2217 i = \u02c6 Mi+f Mi, where \u02c6 M = GD(T, A) are generated semantic gesture landmarks representing the corresponding generated gesture in the i-th frame. GD is the diffusionbased semantic gesture generator. Additionally, f M = GR(A) is the rhythmic information derived from the correspondinng audio speech, with GR as the rhythm generator. 3.2 Knowledge Infusion Module The primary objective of the knowledge infusion module is to transform spoken language text T (i.e., the speech transcription) into direct text instructions (i.e., gloss, see Figure 1(b)), which describe the corresponding fine-grained CS motions. To achieve this, we leverage the LLM, i.e., ChatGPT4 \f[OpenAI and et.al, 2023], the prompt engineering approach to infuse the encoding rules of Chinese CS [Liu and Feng, 2019] into our framework by the following: g = LLM(T, P), (2) where P is our designed prompt based on CS domain knowledge (i.e., prior transformation rules of CS based on [Liu and Feng, 2019]), and T is the input text. Ultimately, this process enables the transformation of our indirectly semantic-related text into directly semantic-related gloss. 3.3 Diffusion-based Generation Module Gloss-based Motion CLIP Fine-tuning MotionCLIP [Tevet et al., 2022a] is a multimodal large-scale model specifically designed for generating general motion gestures. To obtain an accurate feature embedding of CS gloss, we leverage the MotionCLIP as our pre-trained model, and fine-tune it using the generated CS gloss (introduced in Subsection 3.2) and the paired CS gestures. As for the fine-tuning stage, we adopt CLIP-style contrastive learning [Radford et al., 2021] to fine-tune the encoders with CS data. Given a batch of pairs containing CS gesture motion and gloss embeddings, denoted as B = {(zm i , zg i )}B i=1, where B is the batch size. Em and Eg are the corresponding MotionCLIP encoder for both motion sequence and gloss. Zm = Em(M), Zg = Eg(g). The goal of the training is to maximize the similarity between paired zm i and zg i of in the batch while minimizing the similarity of the incorrect pairs \u0000zm i , zg j \u0001 i\u0338=j. A symmetric cross entropy (CE) loss LCE is optimized over these similarity scores. Formally, the loss is: LCLIP = EB\u223cD[LCE (y (zm i ) , pm (zm i )) +LCE \u0000y \u0000zg j \u0001 , pg \u0000zg j \u0001\u0001\u0003 , (3) where y specifies the true correspondence between the gestures zm i and gloss zg j in the training batch B. If they are paired, y = 1, otherwise, y = 0. p is defined as: pm (zm i ) = exp (zm i \u00b7 zg i /\u03b7) PB j=1 exp \u0000zm i \u00b7 zg j /\u03b7 \u0001, (4) where \u03b7 is the temperature of softmax, and pg \u0000zg j \u0001 follow the same computations. Gloss-Prompted Diffusion Model To generate CS gesture video, we propose a Gloss-Prompted Diffusion Model. More precisely, the semantic hand gesture generator GD is designed based on the latent diffusion model [Rombach et al., 2022], which applys diffusion and denoising steps in a pre-trained latent space. The latent diffusion model is trained with the standard noise estimation loss [Ho et al., 2020] defined as: Lnoise = ||\u03f5 \u2212\u03f5\u03b8 (Zn, n, g, A) ||2 2, (5) where Zn is the latent CS gesture at each time step n. A is audio speech, and g is the generated gloss. \u03f5 is the ground truth noise and \u03f5\u03b8 is the noise predicted by latent diffusion model, where \u03b8 is the parameters of latent diffusion model. To inject the information of the gloss prompts into the diffusion network, we employ an adaptive instance normalization (AdaIN) layer [Huang and Belongie, 2017]. Specifically, we leverage the fine-tuned MotionCLIP gloss encoder Eg to convert the gloss prompt into a gloss embedding zg. Then, we learn a MLP network to map the gloss embedding zg to parameters that modify the per-channel mean and variance of the AdaIn layer. To train our Gloss-Prompted Diffusion Model, we employ classifier-free guidance as detailed in [Ho and Salimans, 2022]. Specifically, during training, we enable the diffusion model GD to master both the semantic conditional and unconditional distributions by randomly configuring g = \u2205. This action effectively deactivates the AdaIN layer with a probability of p during the training phase, which is set to 10% [Tevet et al., 2022b]. During inference, the anticipated noise is calculated using: \u03f5\u2217 n = p\u03f5\u03b8 (Zn, n, g, A) + (1 \u2212p)\u03f5\u03b8 (Zn, n, \u2205, A) . (6) After obtaining the predicted noise \u03f5\u2217 n, the model operates in a reverse step-wise manner over N time steps, updating a latent gesture sequence Zn at each time step n. It begins by generating a sequence of latent codes ZN \u223cN(0, I) and subsequently calculates a series of denoised sequences Zn through the iterative removal of the estimated noise \u03f5\u2217 n from Zn (n = N \u22121, . . . , 0). Z0 is the final generated CS gesture latent embedding through N reverse diffusion steps. Z0 is fed into a Transformer-based decoder [Petrovich et al., 2021] to generate semantic CS gesture motion \u02c6 M. 3.4 Audio-driven Rhythmic Module In CS gesture generation, it\u2019s not just the accurate positioning of the gesture that matters; the natural rhythm of gesture motion plays a crucial role. We believe that the audio speech signal contains not only the semantic information but also the rhythmic dynamics of CS, which significantly contributes to achieving visual and auditory coherence. To address this, we introduce a novel Audio-driven Rhythmic Module (ARM), designed to capture the rhythmic dynamics of gestures. This module employs three convolution layers as a rhythmic dynamics generator GR, further aligning the motion dynamics with the CS rhythm. Existing research (e.g., WavLm and AudioLDM) [Lebourdais et al., 2022; Liu et al., 2023] have shown that compared with MFCC features, audio features extracted by the large pre-trained model have a stronger expressive capability and can avoid information loss. Without loss of generality, in this work, we use the encoder of WavLM, denoted as EA to extract audio features to prevent information loss, thereby preserving richer and higher-dimensional rhythmic information. To handle the lip-hand synchronization issue [Liu et al., 2021] in CS, we reformulate the task as one of determining the motion magnitude for each frame within consecutive motion sequences. Unlike methods that attempt to enforce perfect alignment between generated gestures and speech, our approach implicitly learns how to produce asynchronous gestures that correspond to the input speech. Rather than directly controlling the gestures of each individual frame, we focus on regulating the overall rhythm of a motion sequence. \fThe loss function for the ARM is defined as: Lrhythm = ||f M \u2212 \u0000M \u2212\u00af M \u0001 ||, (7) where \u00af M represents the average motion within the set of generated motions M. The difference between M and \u00af M quantifies the magnitude of hand and finger movement. The purpose of Lrhythm is to ensure that the generated f M = GR(EA(A)) maintains the natural offset relative to the mean gesture. EA is the encoder of WavLM. This offset helps in generating motion dynamics for a natural, non-mechanical movement without disrupting the semantics of the CS gesture. We demonstrate the efficacy regarding rhythm quality and naturalness with quantitative result in Sec. 4.3, as well as qualitative result of in Sec. 4.4. Novel Quantitative Rhythmic Metrics In this work, for the first time, rhythm is investigated as an important paralinguistic feature to improve CS\u2019 communication efficacy. To capture the unique asynchronous dynamics between lip and hand movements in CS scenarios, we propose a novel metric, Gesture Audio Difference (GAD), to evaluate the rhythmic synchronization of the generated gestures. This metric is defined as follows: GAD(M, A) = 1 N N X i=1 1[||U M i \u2212U A i ||1 < \u03c4], (8) where M and A represent the CS gesture and audio speech, respectively. The term N denotes the number of annotated temporal segments, which are equal for both speech and gesture. The variable U i refers to the middle time instant of a segment, indicating a specific moment when a gesture or speech occurs. The function 1 is an indicator function, mapping elements within the subset (satisfying ||U M i \u2212U A i ||1 < \u03c4) to one, and all other elements to zero. Taking the asynchrony between audio speech and CS hand movements into consideration, we introduce a threshold \u03c4, which ensures their alignment and is empirically determined based on a statistical study of the hand preceding time [Liu et al., 2020]. 3.5 Training of GlossDiff Framework We employ a semantic loss to ascertain the semantic accuracy of the final generated gestures. To be specific, Lsemantic = 1 \u2212cos (Z0, Z\u2217 0) , (9) where cos(\u00b7, \u00b7) represents the cosine distance, while Z0 and Z\u2217 0 denote the final generated CS gesture latent embedding and the ground truth CS gesture motions, respectively. Following the existing training procedure for denoising diffusion models, we optimize the following loss: Ltotal = \u03b1Lnoise + \u03b2Lsemantic + \u03b3Lrhythm , (10) where \u03b1 is the weight of Lnoise (in Equation (5)), \u03b2 is the weight of Lsemantic (in Equation (9)), and \u03b3 is the weight of Lrhythm (in Equation (7)). 4 Experiments 4.1 MCCS Datatset Previously, only two CS datasets were available for public access: One was in French2 [Liu et al., 2018], consisting of recordings of a single cuer delivering 238 sentences; The other was in British English3 [Liu et al., 2019], similarly featuring a single cuer reciting 97 sentences. To remedy the scarcity of Chinese CS data, we built in this work, for the first time, a large-scale Mandarin Chinese CS dataset that includes contributions from four CS cuers, called MCCS. We first select 1000 text sentences following the below principles: (1) They cover common scenarios in daily life, including colloquial dialogues, more formal words, as well as written words. (2) The materials aim to cover possible syllable combinations. All in all, our text album covers 23 main topics, 72 subtopics, and the most commonly used 399 Mandarin syllables. It comprises a total of 1000 sentences, 10,482 words with an average of 10.5 words per sentence. The shortest sentence contains 4 words, while the longest has 25 words. Then, we recorded CS videos for each of the four cuers performing the 1000 sentences, resulting 4000 sentences in total. All videos are recorded using either a camera or a mobile phone in landscape mode, The four cuers have received systematic training to ensure they can perform Mandarin Chinese CS smoothly and accurately. Note that our dataset has been collected with the explicit consent of the individuals involved and is eligible for open source. 4.2 Experimental Setup During the training phase, we pre-train the motion clip first and then follow an end-to-end pipeline to train the latent diffusion model. The experiments are implemented using PyTorch, with four A6000 GPU cards for model training. During the inference phase, we use the latent diffusion model to generate CS gestures. The training and test data are randomly split as 4 : 1. The number of diffusion steps is 1000, and the training batch size is 128. The weight of loss items is set to \u03b1 = 1, \u03b2 = 0.2 and \u03b3 = 0.1. Evaluation Metrics The conventional evaluation metrics of the generated gestures contain three classes: Percentage of Correct Keypoint (PCK) [Yi and Deva, 2013], Fr\u00b4 echet Gesture Distance (FGD) [Youngwoo et al., 2020], Mean Absolute Joint Errors (MAJE) [Youngwoo et al., 2020], and Mean Acceleration Difference (MAD) [Youngwoo et al., 2020]. In addition, to further measure the unique asynchronous dynamics between lip and hand movements in CS scenarios, we use the novel metric GAD as described in Sec.3.4 to evaluate the rhythmic synchronization of the generated gestures. 4.3 Quantitative Result and Analysis Comparison with SOTA We compare our approach with four recent gesture synthesis methods, i.e., Speech2Gesture [Ginosar et al., 2019], Gestures from Trimodal Context (GTC) [Youngwoo et al., 2020], 2https://zenodo.org/record/5554849#.ZBBCvOxBx8Y 3https://zenodo.org/record/3464212#.ZBBAJuxBx8Y \fTable 1: Experiment results on MCCS Dataset compared with SOTA methods. \u201cGloss-Prompt\u201d indicates the integration of a Gloss Knowledge Infusion Module. The term \u201cWavLM\u201d refers to the substitution of MFCC features with features from the pre-trained large-scale speech model, wavLM. \u201cGloss-CLIP\u201d denotes the incorporation of Gloss-based Motion CLIP Fine-tuning. Methods PCK (%)\u2191 FGD\u2193 MAJE (mm)\u2193 MAD (mm/s2)\u2193 GAD (%)\u2191 Speech2Gesture [Ginosar et al., 2019] 36.84 19.25 61.26 3.97 66.8 GTC [Youngwoo et al., 2020] 41.23 6.73 55.43 2.54 66.7 HA2G [Liu et al., 2022] 43.51 4.07 46.78 2.29 67.2 DiffGesture [Zhu et al., 2023] 47.58 3.50 48.52 2.12 69.9 Our GlossDiff (w/o Gloss-prompt) 51.12 4.72 45.68 1.28 75.6 Our GlossDiff (w/o WavLM) 52.97 4.54 42.31 0.71 78.3 Our GlossDiff (w/o Gloss-CLIP) 53.41 4.31 43.52 0.65 79.1 Our GlossDiff 54.23 3.92 39.28 0.52 79.4 HA2G [Bhattacharya et al., 2021], DiffGesture [Zhu et al., 2023]. We take DiffGesture as the SOTA method among these approaches, since it achieves the best result on the TED Gesture datasets [Youngwoo et al., 2019]. Table 1 provides a detailed comparison among our methods and the previous methods on the MCCS datasets. Our method GlossDiff gives the best results in PCK, MAJE, MAD, and GAD metrics, most of which have a wide superiority leap comparing to the reference systems. The results demonstrate a higher quality of fine-grained gesture generation by our proposed system. The only exception is one FGD score that slightly trails the SOTA method, while it surpasses all other reference methods. Notably, our method\u2019s PCK values are significantly higher than other methods, showing its effectiveness in fine-grained generation. Moreover, our method excels in rhythm performance, achieving the highest GAD values. This superiority on GAD metrics demonstrates that our method can effectively capture the rhythm in CS gesture. Ablation Study We provide the ablation study for three modules in Table 1. The term \u201cGloss-prompt\u201d indicates the integration of a Gloss Knowledge Infusion Module. \u201cWavLM\u201d refers to using features extracted from the pre-trained large-scale speech model wavLM instead of conventional MFCC. \u201cGloss-CLIP\u201d denotes the incorporation of Gloss-based Motion CLIP Finetuning. We can observe that the absence of any module leads to a decline in performance metrics, demonstrating the efficacy of each module in our framework. Specially, the absence of the Gloss-prompt and Gloss-CLIP modules results in a decrease in PCK by 1.85% and 1.26%, respectively, highlighting their critical role in fine-grained generation. 4.4 Qualitative Result and Analysis Visualization of Generated Fine-grained CS Gesture Figure 3 shows fine-grained hand gestures generated with gloss prompts, where each row shows the detailed gloss of different body parts and their gesture sequences. We used arrows to indicate lip movement trends, red circles for finger shape transformations, and red stars for hand position shifts, including their movement directions. The first row in the figure shows the lips\u2019 contour expanding as the gloss input. The second row emphasizes finger shape changing aligned with detailed finger gloss. In the third row, there are subtle hand Figure 3: The visualization result of the generated gesture according to fine-grained Gloss. Better view by zooming in. position shifts, marked by red stars moving from near the mouth to the chin area, showing our method\u2019s effectiveness in using detailed gloss to guide CS gesture generation. Distribution of Fine-grained Gesture Feature To visualize the generated CS gesture in the feature space, we used t-SNE [van der Maaten and Hinton, 2008] for dimension reduction. We uniformly select frames from the generated CS sequences and extract the hand gesture features corresponding to the text. Recall that, as depicted in Figure 1, the MCCS incorporates 8 distinct finger shapes to signify the 24 consonants of the Chinese language, along with 5 hand positions to denote the 16 vowels. In the left part of Figure 4, the 8 distinct clusters are separate, with each cluster corresponding to a set of finger shapes (where each color represents a different consonant group). Some clusters that are very close in distance have similar finger shapes, such as shape8 and shape6, as well as shape2 and shape7. This visualization validates the effectiveness of our method in capturing the fine-grained semantics of CS hand and finger shapes. On the right side of Figure 4, We can find different hand positions have differences in features, but there is more overlap among the clusters, which means they are not as distinctly differentiated in feature-level as finger shapes. \fVisualization of Generated CS Gestures Figure 5 compares the visualization results of our method with the SOTA method, DiffGesture. This comparison includes the gestures\u2019 corresponding audio, text, and ground truth video frames. We highlighted corresponding phonemes in red and used red stars and circles to indicate hand locations and finger shapes, respectively. Our method shows a noticeable improvement in gesture accuracy, particularly in fine-grained details. For example, our index finger shape is more precise than the SOTA method, as seen in the first column. In the second column, our method accurately places the hand beside the face, unlike the SOTA method\u2019s placement beside the eye. The fourth column illustrates our method\u2019s superior precision in thumb position and overall gesture alignment with the ground truth, showing greater adherence to CS rules and enhanced detail accuracy. Figure 4: The visualization of t-SNE clustering for eight groups of consonants corresponding to finger shapes, and five groups of vowels corresponding to hand position. Each color represents a group of consonants or vowels. Figure 5: The visualization result of the generated gestures compared to SOTA method. Better view by zooming in. User Study We conduct a user study to evaluate CS gestures generated by our method compared with SOTA and the ground truth. This study involved 10 groups of videos, each with a groundtruth CS gesture video, videos generated by the current SOTA method (DiffGesture) and our method (GlossDiff). All videos Figure 6: User study results of the ground truth (GT)), current SOTA (DiffGesture) and our method (GlossDiff). were randomly shuffled. Ten subjects trained in CS were asked to rate the CS gesture videos from three perspectives: accuracy, rhythm quality, and naturalness, each with a score ranging from 0 to 10 (the higher the better). We calculated average scores and confidence intervals for each case. It is shown in Figure 6 that our method surpassed the current SOTA DiffGesture in all three metrics, getting closer to the ground truth. This demonstrates our method\u2019s ability to produce more accurate and natural CS gestures, especially in rhythm quality, attributed to the proposed ARM. Our approach notably outperforms the DiffGesture in accuracy, proving its effectiveness in fine-grained gesture generation. 5 Conclusion We introduced a novel GlossDiff framework that effectively generates fine-grained CS gesture sequences. We have proposed a gloss knowledge infusion module and an audio rhythm module for an accurate and natural CS gesture video generation. Additionally, we contributed the first large-scale MCCS dataset. Extensive experiments on MCCS demonstrate our approach\u2019s efficacy, surpassing current SOTA methods. Qualitative experiments and ablation studies validated our system\u2019s overall effectiveness as well as each individual module\u2019s. Future work aims to infuse CS video generation with prosody and emotion. The Automatic Prompt Engineering (APE) is also a promising direction to improve gloss quality. 6 Acknowledgement This work was supported by the National Natural Science Foundation of China (No. 62101351), and Guangzhou Municipal Science and Technology Project: Basic and Applied Basic research projects (No. 2024A04J4232).",
|
| 16 |
+
"additional_graph_info": {
|
| 17 |
+
"graph": [],
|
| 18 |
+
"node_feat": {
|
| 19 |
+
"Wentao Lei": [
|
| 20 |
+
{
|
| 21 |
+
"url": "http://arxiv.org/abs/2404.19277v1",
|
| 22 |
+
"title": "Bridge to Non-Barrier Communication: Gloss-Prompted Fine-grained Cued Speech Gesture Generation with Diffusion Model",
|
| 23 |
+
"abstract": "Cued Speech (CS) is an advanced visual phonetic encoding system that\nintegrates lip reading with hand codings, enabling people with hearing\nimpairments to communicate efficiently. CS video generation aims to produce\nspecific lip and gesture movements of CS from audio or text inputs. The main\nchallenge is that given limited CS data, we strive to simultaneously generate\nfine-grained hand and finger movements, as well as lip movements, meanwhile the\ntwo kinds of movements need to be asynchronously aligned. Existing CS\ngeneration methods are fragile and prone to poor performance due to\ntemplate-based statistical models and careful hand-crafted pre-processing to\nfit the models. Therefore, we propose a novel Gloss-prompted Diffusion-based CS\nGesture generation framework (called GlossDiff). Specifically, to integrate\nadditional linguistic rules knowledge into the model. we first introduce a\nbridging instruction called \\textbf{Gloss}, which is an automatically generated\ndescriptive text to establish a direct and more delicate semantic connection\nbetween spoken language and CS gestures. Moreover, we first suggest rhythm is\nan important paralinguistic feature for CS to improve the communication\nefficacy. Therefore, we propose a novel Audio-driven Rhythmic Module (ARM) to\nlearn rhythm that matches audio speech. Moreover, in this work, we design,\nrecord, and publish the first Chinese CS dataset with four CS cuers. Extensive\nexperiments demonstrate that our method quantitatively and qualitatively\noutperforms current state-of-the-art (SOTA) methods. We release the code and\ndata at https://glossdiff.github.io/.",
|
| 24 |
+
"authors": "Wentao Lei, Li Liu, Jun Wang",
|
| 25 |
+
"published": "2024-04-30",
|
| 26 |
+
"updated": "2024-04-30",
|
| 27 |
+
"primary_cat": "cs.CV",
|
| 28 |
+
"cats": [
|
| 29 |
+
"cs.CV"
|
| 30 |
+
],
|
| 31 |
+
"main_content": "Introduction According to the World Health Organization (WHO), more than 5% of the global population (466 million) suffers from the hearing loss. As a predominant communication method for hearing-impaired people, Lip reading [Puviarasan and \u2217Corresponding Author: avrillliu@hkust-gz.edu.cn. Palanivel, 2011; Fernandez-Lopez et al., 2017] has a major defect of visual confusion. For instance, it struggles to differentiate pronunciations with similar labial shapes, such as [u] and [y], posing challenges for hearing-impaired individuals in accessing spoken language through conventional education. To tackle the limitations of lip reading, and to improve the reading skills of individuals with hearing impairments, in 1967, Cornett introduced the Cued Speech (CS) system [Cornett, 1967], which employs several hand codings (i.e., finger shapes and hand positions) to complement lip reading, providing a clear visual representation of all phonemes in spoken language [Puviarasan and Palanivel, 2011; Fernandez-Lopez et al., 2017]. For instance, in Mandarin Chinese CS (MCCS) [Liu and Feng, 2019] (see Fig. 1(a)), it utilizes five hand positions for encoding vowel groups and eight finger shapes for encoding consonant groups. With CS, individuals with hearing impairments can differentiate sounds that might appear similar when observed on lips by incorporating hand information. Another widely adopted communication method is Sign Language (SL) [Stokoe, 2005; Liddell and Johnson, 1989; Timothy, 2003]. It is crucial to emphasize that CS is not a visual language like SL; instead, it is a coding system of spoken language [Cornett, 1967]. In addition, studies indicate that CS can be learned much more quickly than SL [Reynolds, 2007]. Given that CS can effectively promote non-barrier communication, audio/text to CS gestures video generation draws researchers\u2019 attention. It should be noted that comparing to text, CS is more friendly and more easily adopted by the hearing impaired who are illiterate [Cox et al., 2002; Power et al., 2007]. The multi-modal CS gesture generation is a challenging task for the following reasons: 1) high requirement for finegrained and accurate gesture generation, as shown in Figure 1(a), where nuances in the hand\u2019s position and fingers\u2019 shape lead to quite different semantic meanings; 2) the limited size of CS datasets and expensive annotation cost of complicated fine-grained CS gestures. To address these challenges, we design a novel Gloss-Prompted Diffusion-based CS Gesture generation framework (GlossDiff). Specifically, we first propose a CS gloss, which is a direct motion instruction for bridging the gap between spoken language and CS gestures. It is automatically generated by LLM based on the encoding rule of CS in Figure 1(a). As shown in Figure 1(b), when expressing the word \u201ctree\u201d, which is pronounced as \u201c/\u00f9/ /u/\u201d in arXiv:2404.19277v1 [cs.CV] 30 Apr 2024 \fFigure 1: The details of CS rules and conversion process. (a) is the chart for the Mandarin Chinese Cued Speech (figure from [3]), where five different hand positions are used to code vowels, and eight finger shapes are used to code consonants in Mandarin Chinese. (b) shows the proposed instructional gloss, which directly links the text to the CS movements. Chinese, we generate the intermediate instruction text (i.e., gloss) to describe the process of using CS gestures to express this word, i.e., Stretch out two fingers apart without closing them and place the hand near the neck. Besides, we design a Gloss-Prompted Diffusion Model that can generate accurate hand and finger movements. Moreover, rhythm is a critical paralinguistic information in spoken language. As a coding system for spoken languages, we suggest natural rhythm dynamics should also be considered as a very important feature for CS\u2019s complete semantic expression. More specifically, the rhythm here refers to the ability to generate multi-modal CS speech gesture movements (i.e., hand and finger movements), which match the phoneme durations and utterance prosody of speech. Unfortunately, previous works have not pay enough attention to this. To this end, we propose an Audio-driven Rhythmic Module (ARM) that considers the overall rhythm of the CS movements aligning with speech signals. We leverage the large-scale WavLM [Chen et al., 2022] to extract audio features, which we demonstrate outperforming traditional MFCC features. We summarized our contributions as follows: 1) A novel GlossDiff framework that simultaneously generates finegrained hand position, finger movements, and lip reading in CS. Specifically, we introduce a CS gloss, which establishes a direct link between text/audio and CS hand movements, enabling more specific prompts for an accurate fine-grained CS gesture generation. 2) A new module ARM that improves the overall rhythm of the CS movements. 3) Publication of the first multi-cuer large-scale Mandarin Chinese CS (MCCS) dataset, which contains four cuers1 and 4000 CS videos. 4) Extensive experiments conducted on the MCCS dataset show the proposed GlossDiff achieves SOTA performance under different metrics. The qualitative and ablation studies, as well as user studies, further verify the effectiveness of the proposed model. 1The people who perform CS are called the cuer 2 Related work 2.1 Cued Speech Generation In prior work, early attempts at CS gesture generation [Duchnowski et al., 1998; Bailly et al., 2008] are mainly rulebased. Notably, in [Duchnowski et al., 1998], specific keywords were manually selected, along with low-context sentences [Rothauser, 1969], and manual templates for corresponding hand gestures were predefined. This processing involved CS recognition, followed by mapping the recognized text to the hand templates. However, this method relied heavily on hand-crafted designs, which constrained the expressiveness of CS gestures and increased the required manual effort. In [Bailly et al., 2008], a post-processing algorithm was introduced to refine synthesized hand gestures, including adjustments for hand rotation and translation. However, this approach required prior human knowledge to adapt the algorithm to new images, resulting in limited robustness. To the best of our knowledge, there is still a gap in research about end-to-end deep learning-based CS gesture generation. 2.2 Co-speech and Sign Language Generation The generation of Co-speech gestures involves generating body movements corresponding to audio input. Previous studies mainly developed large speech-gesture datasets to learn how speech audio maps to human skeletons using deep learning, as in [Ao et al., 2022]. To make gestures more expressive, some methods use Generative Adversarial Networks (GANs) for more realistic results [Ginosar et al., 2019; Youngwoo et al., 2020]. Recently, diffusion models like DiffGesture [Zhu et al., 2023], effectively links audio and gestures while keeping time consistency, allowing for highquality Co-speech gestures. However, Co-speech gesture generation focuses on fluency and style rather than gesture fine-grained accuracy. Existing methods cannot generate accurate subtle CS hand gestures. In the literature, there are several Sign Language (SL) generation methods: 1) The Neural Machine Translation approach from [Stoll et al., 2020] sees SL generation as translation, using neural models to process SL text. 2) The Motion Graph method in [Stoll et al., 2020] uses motion graphics to make a directed graph from motion capture data for \fFigure 2: The overall framework of the proposed GlossDiff, where (a), (b), (c) represent the Knowledge Infusion Module, Audio Rhythmic Module and Diffusion-based generation module, respectively. SL creation. 3) Conditional generation methods, like GANs and VAEs, are also used for SL gestures. 4) Some researches have introduced transformer-based models for SL, as mentioned in [Ben et al., 2020]. Despite these advancements in SL gesture generation, applying these methods to CS gestures has limitations. Firstly, CS gesture generation necessitates more precise methods to achieve complex finegrained gesture generation, while SL gesture generation is more coarse-grained. Secondly, SL gesture is not related with lip-reading, thus cannot match the speech rhythm and gesture-speech asynchrony characteristics [Liu et al., 2021; Liu and Liu, 2023] in CS gesture generation. 2.3 NeRF and Diffusion-based Gesture Generation NeRF is a novel technique in 3D modeling, which effectively creates highly detailed and photo-realistic static scenes from 2D images. Its application has been extended to generating life-like talking head models [Guo et al., 2021], demonstrating NeRF\u2019s capability in handling subtle facial movements and expressions. However, the application of NeRF in fullbody gesture generation is relatively limited. Additionally, the high requirements for data and computation further constrain its use in CS gesture generation. Currently, in the field of human gesture generation, diffusion models [Ho et al., 2020] are predominantly used in two main applications: generating comparably large body movements (e.g., human walking) [Tevet et al., 2022b; Zhang et al., 2022; Zhao et al., 2023; Ao et al., 2023] and generating poses in Co-speech scenarios [Ji et al., 2023; Zhi et al., 2023; Yang et al., 2023]. However, the existing approaches lack the capability to tackle fine-grained gesture generation. Additionally, they primarily focus on body poses without lip movements. Lastly, their diffusion models require extensive training data, which is not feasible given the limited dataset in our CS scenarios. 3 Method In this section, we provide a comprehensive description of our proposed method, GlossDiff, designed for rhythm-aware CS gesture generation, which seamlessly integrates domainspecific knowledge for CS generation. As shown in Figure 2, our GlossDiff framework consists of three primary components: the knowledge infusion module, the rhythmic module, and the Diffusion-based generation module. 3.1 Problem Formulation Automatic CS gesture generation involves generating the corresponding landmarks sequence of CS gesture M \u2217, given an audio signal A and the text T. In the task of automatic multimodal CS gesture generation, the combined features of A, T, and the generated rhythmic information are input into the CS gesture generator. The final CS gestures (M \u2217) including lips, fingers, and hand positions are obtained by minimizing: L X i=1 ||M \u2217 i \u2212Mi||, (1) where L represents the frame count of the current CS video. The ground truth CS gesture landmarks Mi in the i-th frame of the CS video is obtained by the Expose method [Choutas et al., 2020]. M \u2217 i = \u02c6 Mi+f Mi, where \u02c6 M = GD(T, A) are generated semantic gesture landmarks representing the corresponding generated gesture in the i-th frame. GD is the diffusionbased semantic gesture generator. Additionally, f M = GR(A) is the rhythmic information derived from the correspondinng audio speech, with GR as the rhythm generator. 3.2 Knowledge Infusion Module The primary objective of the knowledge infusion module is to transform spoken language text T (i.e., the speech transcription) into direct text instructions (i.e., gloss, see Figure 1(b)), which describe the corresponding fine-grained CS motions. To achieve this, we leverage the LLM, i.e., ChatGPT4 \f[OpenAI and et.al, 2023], the prompt engineering approach to infuse the encoding rules of Chinese CS [Liu and Feng, 2019] into our framework by the following: g = LLM(T, P), (2) where P is our designed prompt based on CS domain knowledge (i.e., prior transformation rules of CS based on [Liu and Feng, 2019]), and T is the input text. Ultimately, this process enables the transformation of our indirectly semantic-related text into directly semantic-related gloss. 3.3 Diffusion-based Generation Module Gloss-based Motion CLIP Fine-tuning MotionCLIP [Tevet et al., 2022a] is a multimodal large-scale model specifically designed for generating general motion gestures. To obtain an accurate feature embedding of CS gloss, we leverage the MotionCLIP as our pre-trained model, and fine-tune it using the generated CS gloss (introduced in Subsection 3.2) and the paired CS gestures. As for the fine-tuning stage, we adopt CLIP-style contrastive learning [Radford et al., 2021] to fine-tune the encoders with CS data. Given a batch of pairs containing CS gesture motion and gloss embeddings, denoted as B = {(zm i , zg i )}B i=1, where B is the batch size. Em and Eg are the corresponding MotionCLIP encoder for both motion sequence and gloss. Zm = Em(M), Zg = Eg(g). The goal of the training is to maximize the similarity between paired zm i and zg i of in the batch while minimizing the similarity of the incorrect pairs \u0000zm i , zg j \u0001 i\u0338=j. A symmetric cross entropy (CE) loss LCE is optimized over these similarity scores. Formally, the loss is: LCLIP = EB\u223cD[LCE (y (zm i ) , pm (zm i )) +LCE \u0000y \u0000zg j \u0001 , pg \u0000zg j \u0001\u0001\u0003 , (3) where y specifies the true correspondence between the gestures zm i and gloss zg j in the training batch B. If they are paired, y = 1, otherwise, y = 0. p is defined as: pm (zm i ) = exp (zm i \u00b7 zg i /\u03b7) PB j=1 exp \u0000zm i \u00b7 zg j /\u03b7 \u0001, (4) where \u03b7 is the temperature of softmax, and pg \u0000zg j \u0001 follow the same computations. Gloss-Prompted Diffusion Model To generate CS gesture video, we propose a Gloss-Prompted Diffusion Model. More precisely, the semantic hand gesture generator GD is designed based on the latent diffusion model [Rombach et al., 2022], which applys diffusion and denoising steps in a pre-trained latent space. The latent diffusion model is trained with the standard noise estimation loss [Ho et al., 2020] defined as: Lnoise = ||\u03f5 \u2212\u03f5\u03b8 (Zn, n, g, A) ||2 2, (5) where Zn is the latent CS gesture at each time step n. A is audio speech, and g is the generated gloss. \u03f5 is the ground truth noise and \u03f5\u03b8 is the noise predicted by latent diffusion model, where \u03b8 is the parameters of latent diffusion model. To inject the information of the gloss prompts into the diffusion network, we employ an adaptive instance normalization (AdaIN) layer [Huang and Belongie, 2017]. Specifically, we leverage the fine-tuned MotionCLIP gloss encoder Eg to convert the gloss prompt into a gloss embedding zg. Then, we learn a MLP network to map the gloss embedding zg to parameters that modify the per-channel mean and variance of the AdaIn layer. To train our Gloss-Prompted Diffusion Model, we employ classifier-free guidance as detailed in [Ho and Salimans, 2022]. Specifically, during training, we enable the diffusion model GD to master both the semantic conditional and unconditional distributions by randomly configuring g = \u2205. This action effectively deactivates the AdaIN layer with a probability of p during the training phase, which is set to 10% [Tevet et al., 2022b]. During inference, the anticipated noise is calculated using: \u03f5\u2217 n = p\u03f5\u03b8 (Zn, n, g, A) + (1 \u2212p)\u03f5\u03b8 (Zn, n, \u2205, A) . (6) After obtaining the predicted noise \u03f5\u2217 n, the model operates in a reverse step-wise manner over N time steps, updating a latent gesture sequence Zn at each time step n. It begins by generating a sequence of latent codes ZN \u223cN(0, I) and subsequently calculates a series of denoised sequences Zn through the iterative removal of the estimated noise \u03f5\u2217 n from Zn (n = N \u22121, . . . , 0). Z0 is the final generated CS gesture latent embedding through N reverse diffusion steps. Z0 is fed into a Transformer-based decoder [Petrovich et al., 2021] to generate semantic CS gesture motion \u02c6 M. 3.4 Audio-driven Rhythmic Module In CS gesture generation, it\u2019s not just the accurate positioning of the gesture that matters; the natural rhythm of gesture motion plays a crucial role. We believe that the audio speech signal contains not only the semantic information but also the rhythmic dynamics of CS, which significantly contributes to achieving visual and auditory coherence. To address this, we introduce a novel Audio-driven Rhythmic Module (ARM), designed to capture the rhythmic dynamics of gestures. This module employs three convolution layers as a rhythmic dynamics generator GR, further aligning the motion dynamics with the CS rhythm. Existing research (e.g., WavLm and AudioLDM) [Lebourdais et al., 2022; Liu et al., 2023] have shown that compared with MFCC features, audio features extracted by the large pre-trained model have a stronger expressive capability and can avoid information loss. Without loss of generality, in this work, we use the encoder of WavLM, denoted as EA to extract audio features to prevent information loss, thereby preserving richer and higher-dimensional rhythmic information. To handle the lip-hand synchronization issue [Liu et al., 2021] in CS, we reformulate the task as one of determining the motion magnitude for each frame within consecutive motion sequences. Unlike methods that attempt to enforce perfect alignment between generated gestures and speech, our approach implicitly learns how to produce asynchronous gestures that correspond to the input speech. Rather than directly controlling the gestures of each individual frame, we focus on regulating the overall rhythm of a motion sequence. \fThe loss function for the ARM is defined as: Lrhythm = ||f M \u2212 \u0000M \u2212\u00af M \u0001 ||, (7) where \u00af M represents the average motion within the set of generated motions M. The difference between M and \u00af M quantifies the magnitude of hand and finger movement. The purpose of Lrhythm is to ensure that the generated f M = GR(EA(A)) maintains the natural offset relative to the mean gesture. EA is the encoder of WavLM. This offset helps in generating motion dynamics for a natural, non-mechanical movement without disrupting the semantics of the CS gesture. We demonstrate the efficacy regarding rhythm quality and naturalness with quantitative result in Sec. 4.3, as well as qualitative result of in Sec. 4.4. Novel Quantitative Rhythmic Metrics In this work, for the first time, rhythm is investigated as an important paralinguistic feature to improve CS\u2019 communication efficacy. To capture the unique asynchronous dynamics between lip and hand movements in CS scenarios, we propose a novel metric, Gesture Audio Difference (GAD), to evaluate the rhythmic synchronization of the generated gestures. This metric is defined as follows: GAD(M, A) = 1 N N X i=1 1[||U M i \u2212U A i ||1 < \u03c4], (8) where M and A represent the CS gesture and audio speech, respectively. The term N denotes the number of annotated temporal segments, which are equal for both speech and gesture. The variable U i refers to the middle time instant of a segment, indicating a specific moment when a gesture or speech occurs. The function 1 is an indicator function, mapping elements within the subset (satisfying ||U M i \u2212U A i ||1 < \u03c4) to one, and all other elements to zero. Taking the asynchrony between audio speech and CS hand movements into consideration, we introduce a threshold \u03c4, which ensures their alignment and is empirically determined based on a statistical study of the hand preceding time [Liu et al., 2020]. 3.5 Training of GlossDiff Framework We employ a semantic loss to ascertain the semantic accuracy of the final generated gestures. To be specific, Lsemantic = 1 \u2212cos (Z0, Z\u2217 0) , (9) where cos(\u00b7, \u00b7) represents the cosine distance, while Z0 and Z\u2217 0 denote the final generated CS gesture latent embedding and the ground truth CS gesture motions, respectively. Following the existing training procedure for denoising diffusion models, we optimize the following loss: Ltotal = \u03b1Lnoise + \u03b2Lsemantic + \u03b3Lrhythm , (10) where \u03b1 is the weight of Lnoise (in Equation (5)), \u03b2 is the weight of Lsemantic (in Equation (9)), and \u03b3 is the weight of Lrhythm (in Equation (7)). 4 Experiments 4.1 MCCS Datatset Previously, only two CS datasets were available for public access: One was in French2 [Liu et al., 2018], consisting of recordings of a single cuer delivering 238 sentences; The other was in British English3 [Liu et al., 2019], similarly featuring a single cuer reciting 97 sentences. To remedy the scarcity of Chinese CS data, we built in this work, for the first time, a large-scale Mandarin Chinese CS dataset that includes contributions from four CS cuers, called MCCS. We first select 1000 text sentences following the below principles: (1) They cover common scenarios in daily life, including colloquial dialogues, more formal words, as well as written words. (2) The materials aim to cover possible syllable combinations. All in all, our text album covers 23 main topics, 72 subtopics, and the most commonly used 399 Mandarin syllables. It comprises a total of 1000 sentences, 10,482 words with an average of 10.5 words per sentence. The shortest sentence contains 4 words, while the longest has 25 words. Then, we recorded CS videos for each of the four cuers performing the 1000 sentences, resulting 4000 sentences in total. All videos are recorded using either a camera or a mobile phone in landscape mode, The four cuers have received systematic training to ensure they can perform Mandarin Chinese CS smoothly and accurately. Note that our dataset has been collected with the explicit consent of the individuals involved and is eligible for open source. 4.2 Experimental Setup During the training phase, we pre-train the motion clip first and then follow an end-to-end pipeline to train the latent diffusion model. The experiments are implemented using PyTorch, with four A6000 GPU cards for model training. During the inference phase, we use the latent diffusion model to generate CS gestures. The training and test data are randomly split as 4 : 1. The number of diffusion steps is 1000, and the training batch size is 128. The weight of loss items is set to \u03b1 = 1, \u03b2 = 0.2 and \u03b3 = 0.1. Evaluation Metrics The conventional evaluation metrics of the generated gestures contain three classes: Percentage of Correct Keypoint (PCK) [Yi and Deva, 2013], Fr\u00b4 echet Gesture Distance (FGD) [Youngwoo et al., 2020], Mean Absolute Joint Errors (MAJE) [Youngwoo et al., 2020], and Mean Acceleration Difference (MAD) [Youngwoo et al., 2020]. In addition, to further measure the unique asynchronous dynamics between lip and hand movements in CS scenarios, we use the novel metric GAD as described in Sec.3.4 to evaluate the rhythmic synchronization of the generated gestures. 4.3 Quantitative Result and Analysis Comparison with SOTA We compare our approach with four recent gesture synthesis methods, i.e., Speech2Gesture [Ginosar et al., 2019], Gestures from Trimodal Context (GTC) [Youngwoo et al., 2020], 2https://zenodo.org/record/5554849#.ZBBCvOxBx8Y 3https://zenodo.org/record/3464212#.ZBBAJuxBx8Y \fTable 1: Experiment results on MCCS Dataset compared with SOTA methods. \u201cGloss-Prompt\u201d indicates the integration of a Gloss Knowledge Infusion Module. The term \u201cWavLM\u201d refers to the substitution of MFCC features with features from the pre-trained large-scale speech model, wavLM. \u201cGloss-CLIP\u201d denotes the incorporation of Gloss-based Motion CLIP Fine-tuning. Methods PCK (%)\u2191 FGD\u2193 MAJE (mm)\u2193 MAD (mm/s2)\u2193 GAD (%)\u2191 Speech2Gesture [Ginosar et al., 2019] 36.84 19.25 61.26 3.97 66.8 GTC [Youngwoo et al., 2020] 41.23 6.73 55.43 2.54 66.7 HA2G [Liu et al., 2022] 43.51 4.07 46.78 2.29 67.2 DiffGesture [Zhu et al., 2023] 47.58 3.50 48.52 2.12 69.9 Our GlossDiff (w/o Gloss-prompt) 51.12 4.72 45.68 1.28 75.6 Our GlossDiff (w/o WavLM) 52.97 4.54 42.31 0.71 78.3 Our GlossDiff (w/o Gloss-CLIP) 53.41 4.31 43.52 0.65 79.1 Our GlossDiff 54.23 3.92 39.28 0.52 79.4 HA2G [Bhattacharya et al., 2021], DiffGesture [Zhu et al., 2023]. We take DiffGesture as the SOTA method among these approaches, since it achieves the best result on the TED Gesture datasets [Youngwoo et al., 2019]. Table 1 provides a detailed comparison among our methods and the previous methods on the MCCS datasets. Our method GlossDiff gives the best results in PCK, MAJE, MAD, and GAD metrics, most of which have a wide superiority leap comparing to the reference systems. The results demonstrate a higher quality of fine-grained gesture generation by our proposed system. The only exception is one FGD score that slightly trails the SOTA method, while it surpasses all other reference methods. Notably, our method\u2019s PCK values are significantly higher than other methods, showing its effectiveness in fine-grained generation. Moreover, our method excels in rhythm performance, achieving the highest GAD values. This superiority on GAD metrics demonstrates that our method can effectively capture the rhythm in CS gesture. Ablation Study We provide the ablation study for three modules in Table 1. The term \u201cGloss-prompt\u201d indicates the integration of a Gloss Knowledge Infusion Module. \u201cWavLM\u201d refers to using features extracted from the pre-trained large-scale speech model wavLM instead of conventional MFCC. \u201cGloss-CLIP\u201d denotes the incorporation of Gloss-based Motion CLIP Finetuning. We can observe that the absence of any module leads to a decline in performance metrics, demonstrating the efficacy of each module in our framework. Specially, the absence of the Gloss-prompt and Gloss-CLIP modules results in a decrease in PCK by 1.85% and 1.26%, respectively, highlighting their critical role in fine-grained generation. 4.4 Qualitative Result and Analysis Visualization of Generated Fine-grained CS Gesture Figure 3 shows fine-grained hand gestures generated with gloss prompts, where each row shows the detailed gloss of different body parts and their gesture sequences. We used arrows to indicate lip movement trends, red circles for finger shape transformations, and red stars for hand position shifts, including their movement directions. The first row in the figure shows the lips\u2019 contour expanding as the gloss input. The second row emphasizes finger shape changing aligned with detailed finger gloss. In the third row, there are subtle hand Figure 3: The visualization result of the generated gesture according to fine-grained Gloss. Better view by zooming in. position shifts, marked by red stars moving from near the mouth to the chin area, showing our method\u2019s effectiveness in using detailed gloss to guide CS gesture generation. Distribution of Fine-grained Gesture Feature To visualize the generated CS gesture in the feature space, we used t-SNE [van der Maaten and Hinton, 2008] for dimension reduction. We uniformly select frames from the generated CS sequences and extract the hand gesture features corresponding to the text. Recall that, as depicted in Figure 1, the MCCS incorporates 8 distinct finger shapes to signify the 24 consonants of the Chinese language, along with 5 hand positions to denote the 16 vowels. In the left part of Figure 4, the 8 distinct clusters are separate, with each cluster corresponding to a set of finger shapes (where each color represents a different consonant group). Some clusters that are very close in distance have similar finger shapes, such as shape8 and shape6, as well as shape2 and shape7. This visualization validates the effectiveness of our method in capturing the fine-grained semantics of CS hand and finger shapes. On the right side of Figure 4, We can find different hand positions have differences in features, but there is more overlap among the clusters, which means they are not as distinctly differentiated in feature-level as finger shapes. \fVisualization of Generated CS Gestures Figure 5 compares the visualization results of our method with the SOTA method, DiffGesture. This comparison includes the gestures\u2019 corresponding audio, text, and ground truth video frames. We highlighted corresponding phonemes in red and used red stars and circles to indicate hand locations and finger shapes, respectively. Our method shows a noticeable improvement in gesture accuracy, particularly in fine-grained details. For example, our index finger shape is more precise than the SOTA method, as seen in the first column. In the second column, our method accurately places the hand beside the face, unlike the SOTA method\u2019s placement beside the eye. The fourth column illustrates our method\u2019s superior precision in thumb position and overall gesture alignment with the ground truth, showing greater adherence to CS rules and enhanced detail accuracy. Figure 4: The visualization of t-SNE clustering for eight groups of consonants corresponding to finger shapes, and five groups of vowels corresponding to hand position. Each color represents a group of consonants or vowels. Figure 5: The visualization result of the generated gestures compared to SOTA method. Better view by zooming in. User Study We conduct a user study to evaluate CS gestures generated by our method compared with SOTA and the ground truth. This study involved 10 groups of videos, each with a groundtruth CS gesture video, videos generated by the current SOTA method (DiffGesture) and our method (GlossDiff). All videos Figure 6: User study results of the ground truth (GT)), current SOTA (DiffGesture) and our method (GlossDiff). were randomly shuffled. Ten subjects trained in CS were asked to rate the CS gesture videos from three perspectives: accuracy, rhythm quality, and naturalness, each with a score ranging from 0 to 10 (the higher the better). We calculated average scores and confidence intervals for each case. It is shown in Figure 6 that our method surpassed the current SOTA DiffGesture in all three metrics, getting closer to the ground truth. This demonstrates our method\u2019s ability to produce more accurate and natural CS gestures, especially in rhythm quality, attributed to the proposed ARM. Our approach notably outperforms the DiffGesture in accuracy, proving its effectiveness in fine-grained gesture generation. 5 Conclusion We introduced a novel GlossDiff framework that effectively generates fine-grained CS gesture sequences. We have proposed a gloss knowledge infusion module and an audio rhythm module for an accurate and natural CS gesture video generation. Additionally, we contributed the first large-scale MCCS dataset. Extensive experiments on MCCS demonstrate our approach\u2019s efficacy, surpassing current SOTA methods. Qualitative experiments and ablation studies validated our system\u2019s overall effectiveness as well as each individual module\u2019s. Future work aims to infuse CS video generation with prosody and emotion. The Automatic Prompt Engineering (APE) is also a promising direction to improve gloss quality. 6 Acknowledgement This work was supported by the National Natural Science Foundation of China (No. 62101351), and Guangzhou Municipal Science and Technology Project: Basic and Applied Basic research projects (No. 2024A04J4232)."
|
| 32 |
+
},
|
| 33 |
+
{
|
| 34 |
+
"url": "http://arxiv.org/abs/2303.01707v1",
|
| 35 |
+
"title": "Spatio-Temporal Structure Consistency for Semi-supervised Medical Image Classification",
|
| 36 |
+
"abstract": "Intelligent medical diagnosis has shown remarkable progress based on the\nlarge-scale datasets with precise annotations. However, fewer labeled images\nare available due to significantly expensive cost for annotating data by\nexperts. To fully exploit the easily available unlabeled data, we propose a\nnovel Spatio-Temporal Structure Consistent (STSC) learning framework.\nSpecifically, a gram matrix is derived to combine the spatial structure\nconsistency and temporal structure consistency together. This gram matrix\ncaptures the structural similarity among the representations of different\ntraining samples. At the spatial level, our framework explicitly enforces the\nconsistency of structural similarity among different samples under\nperturbations. At the temporal level, we consider the consistency of the\nstructural similarity in different training iterations by digging out the\nstable sub-structures in a relation graph. Experiments on two medical image\ndatasets (i.e., ISIC 2018 challenge and ChestX-ray14) show that our method\noutperforms state-of-the-art SSL methods. Furthermore, extensive qualitative\nanalysis on the Gram matrices and heatmaps by Grad-CAM are presented to\nvalidate the effectiveness of our method.",
|
| 37 |
+
"authors": "Wentao Lei, Lei Liu, Li Liu",
|
| 38 |
+
"published": "2023-03-03",
|
| 39 |
+
"updated": "2023-03-03",
|
| 40 |
+
"primary_cat": "eess.IV",
|
| 41 |
+
"cats": [
|
| 42 |
+
"eess.IV",
|
| 43 |
+
"cs.CV"
|
| 44 |
+
],
|
| 45 |
+
"main_content": "INTRODUCTION Deep learning has achieved success in medical image analysis with large-scale datasets and manual annotations. Studies [1,2] have shown that larger labeled datasets lead to better performance. However, obtaining accurate labels for medical images is costly and time-consuming due to the need for clinical expertise, making it challenging to build a large-scale dataset with accurate labels [3\u20135]. To alleviate the over-dependence on annotations, SSL method is designed to improve performance by leveraging abundant unlabeled data with limited labeled data. SSL approaches can be roughly categorized into two categories: self-training [6] and consistency regularization [7]. The former generates arti\ufb01cial pseudo labels from the predictions of * Corresponding author: avrillliu@hkust-gz.edu.cn. \u2020 Equal contribution. unlabeled data and then adds them into the training set [8]. The latter improves the availability of unlabeled data based on the predictions consistency between different modi\ufb01ed versions of the same input [9\u201311]. As one of the key methods of SSL, consistency regularization aims to capture the relationships for both labeled and unlabeled data in the feature space. For instance, [12] enforced the prediction consistency of the same sample under different perturbations to learn a uni\ufb01ed feature space. [13] emphasized the consistency of the spatial relationships among different samples via the relation information of unlabeled data. Above-mentioned approaches mainly investigate the spatial structure relationships to utilize unlabeled samples. However, there is a lacking of effective exploration for temporal consistency, which could maintain stable spatial relationships along with training. Medical professionals frequently consult prior samples to aid in their diagnostic decision-making. The value of these samples increases with higher con\ufb01dence. Drawing inspiration from this clinical practice, we propose a novel spatiotemporal structure consistent (STSC) semi-supervised framework (see Fig. 1) to explore spatial and temporal structural relationships among different samples simultaneously. In particular, a case-level gram matrix is derived to describe the similarity among the different samples in the representation space. Then we transform the gram matrix into an adjacency matrix to represent the graph structure of training samples. During training, stable spatial structure can be obtained by encouraging consistency of gram matrix under different perturbations on the inputs. At the late training stage, we present a Temporal Sub-structure Consistency (TSC) method to maintain the temporal consistency of the structural relationships, which further captures the stable sub-structures in a relation graph along with training. More discriminative semantic information can be learned from the relationships of unlabeled data guided by spatio-temporal structure consistency. In summary, the main contributions are: (1) A novel STSC semi-supervised learning framework is proposed to ef\ufb01ciently leverage the unlabeled data, which reduces the requirement of labeled data on both single-label and multi-label tasks. (2) We propose a Temporal Sub-structure Consistency (TSC) method to explore the stable sub-structures in a relation graph. It can effectively capture the stable sample structure arXiv:2303.01707v1 [eess.IV] 3 Mar 2023 \falong with training. (3) Experiments on two public medical image datasets (i.e., ISIC 2018 and ChestX-ray14) demonstrate the superior performance of our approach compared with the state-of-the-art (SOTA) methods. 2. METHOD 2.1. Spatio-Temporal Structure Consistent Framework Preliminaries. We consider the basic image classi\ufb01cation task with c categories. Under semi-supervised setting, we have a labeled set DL = {(xi, yi)}N i=1 with N labeled samples, where yi represents the label for xi. There is also an unlabeled set DU = {xi}N+M i=N+1 with M unlabeled samples. The target is to learn a deep model f(\u00b7, \u03b8) parameterized by \u03b8 based on both labeled and unlabeled training set. Typically, the consistency regularization based semi-supervised learning aims to optimize the following objective function: min \u03b8 X (x,y)\u2208DL Ls (f (x, \u03b8) , y)+\u03bb X x\u2208DU\u222aDL Lu (f(\u03b7, x, \u03b8), f(\u02c6 \u03b7, x, \u03b8\u2032)) , (1) where Ls denotes the supervised loss on the labeled set DL, and Lu is the unsupervised consistency loss to force the consistent predictions of the same inputs under different perturbations. Here, we exploit the teacher-student structure with the same network architecture, parameterized by \u03b8 and \u03b8\u2032, respectively. \u03b7 and \u03b7\u2032 denote the different perturbations applied to the same input images. \u03bb is a hyper-parameter that controls the trade-off between the supervised and unsupervised loss. Teacher-Student Model. The teacher model is updated as the exponential moving average (EMA) of the weights \u03b8 of the student model. The teacher parameters \u03b8\u2032 t are updated as : \u03b8\u2032 t = \u03b1\u03b8\u2032 t\u22121 + (1 \u2212\u03b1)\u03b8t at training iteration t. \u03b1 is a hyper-parameter to control the updating rate. Our framework preserves the conventional individual consistency mechanism to force the consistent predictions of teacher and student model [7] under different perturbations, which optimizes the following sample-level loss: Lc = X xi\u2208DU\u222aDL E\u03b7\u2032,\u03b7 \u2225f (xi, \u03b8\u2032, \u03b7\u2032) \u2212f (xi, \u03b8, \u03b7)\u22252 2 . (2) 2.2. Spatial and Temporal Structure Consistency In this section, we \ufb01rst derive a case-level Gram Matrix [14] to capture the structural relations among different samples. Given a mini-batch with B samples, let F l \u2208RB\u00d7H\u00d7W \u00d7C denote the metrics of activation maps at layer l, where H and W are the sizes of the feature map, and C is the channel number. The feature map F l is \ufb01rst reshaped as Dl \u2208RB\u00d7HW C. Then the Case-wise Gram Matrix M l \u2208RB\u00d7B is computed as: M l = Dl \u00b7 \u0000Dl\u0001T , where Mij is the inner product between Dl (i) and Dl (j) to measure the similarity between the activations of ith and jth sample in the mini-batch. The \ufb01nal sample relation matrix Rl is obtained by conducting the L2 normalization for each row M l i of M l, which is expressed as: Rl = \" M l 1 \r \rM l 1 \r \r 2 , . . . , M l B \r \rM l B \r \r 2 #T . (3) The spatial structure consistency regularizes the relation matrix Rl to be stable under different perturbations, preserving the spatial semantic relation among samples. The proposed spatial structure consistency loss is de\ufb01ned as: Lsc = X X\u2208{DU\u222aDL} 1 B \r \rRl(X; \u03b8, \u03b7) \u2212Rl (X; \u03b8\u2032, \u03b7\u2032) \r \r2 2 . (4) 2.3. Temporal Sub-structure Consistency To further explore the structure of the training samples at the temporal level, we propose a Temporal Sub-structure Consistency to maintain stable spatial information based on a graph. Concretely, we \ufb01rst obtain an adjacent matrix A by binarizing the Gram Matrix R. Then a threshold \u03c4 is used to \ufb01lter sample edges with weak semantic relationship. The element A(i,j) will be set as one if its corresponding position in Gram Matrix is larger than \u03c4, indicating xi and xj have closer semantic relationship, otherwise A(i,j) will be set to zero: A(i,j) = ( 1, R(i,j) \u2265\u03c4, 0, R(i,j) < \u03c4. (5) A graph G(X, E) is generated from the adjacent matrix A, where X is the set of vertices (samples) and E is the set of edges (edge E(xi, xj) exists when A(i, j) = 1). During training, we identify stable sub-structures s( \u00af X) in G where all elements in \u00af X are connected in both time t and t + 1. The collection of stable sub-structures is denoted as S = {si}k i=1, where si is the ith stable sub-structures and k is the amount of stable sub-structures. The temporal structure consistency is designed to maintain stable sub-gram matrix Rl at different training iterations, which obtains temporal relationships among of different samples. The temporal structure consistency is de\ufb01ned as: Ltc = k X i=1 X X\u2208{si} 1 B \r \rRl t(X; \u03b8, \u03b7) \u2212Rl t\u22121 (X; \u03b8, \u03b7) \r \r2 2 . (6) Finally we can obtain the total objective functions for the STSC semi-supervised framework as follows: L = Ls + \u03bbLu, Lu = Lc + \u03b2Lsc + \u03b3Ltc, (7) where the supervised objective and the unsupervised objective are denoted as Ls and Lu respectively. The unsupervised objective is composed of the individual consistency Lc and the spatial-temporal consistency Lsc and Ltc. To keep a balance between Lc, Lsc and Ltc, we set 2 hyperparameters denoted as \u03b2, \u03b3, which are generally set as 1, and \u03bb is the trade-off weight between the supervised and unsupervised loss. \fFig. 1. The pipeline of the proposed STSC. The teacher model is updated as the EMA of the student model. Spatio-temporal consistency consists of two parts: spatial relation loss Lsc and temporal relation loss Ltc. 3. EXPERIMENT 3.1. Experimental Setup Datasets. The experiments are conducted on two public medical datasets: ChestX-ray14 [15] and ISIC 2018 Skin Lesion Analysis [16,17]. For fair comparisons, ISIC 2018 data is randomly split into a training, a validation and a test set (7 : 1 : 2) following [13]. ChestX-ray14 dataset is split into training, validation, and test sets (7 : 1 : 2) following [18]. Implementations Details. For ISIC2018 dataset, the batch size is 64. For ChestX-ray14 dataset, the batch size is 48. The Adam is used with initial learning rate (1e \u22124), which is decayed by a power of 0.9 after every epoch. All experiments are conducted on 4 tesla-V100 GPUs. The evaluation metrics include AUC, Accuracy, Sensitivity and Speci\ufb01city. 3.2. Result and Analysis ISIC 2018 Dataset. In Tab. 1, we compare the performance of the proposed method with previous approaches using 20% labeled data on the ISIC 2018 dataset. The upper bound performance is obtained as a baseline by training a supervised model using 100% labeled data. The self-training method obtains higher speci\ufb01city than the other approaches, which bene\ufb01ts from negative samples. Compared with previous SOTA method SRC-MT, our method can outperform about 3.58%, 1.78% and 7.10% for the AUC, accuracy, and sensitivity, respectively. Notably, our approach achieves improvements on all metrics over SRC-MT by enforcing the consistency of the spatial and temporal relationships among different samples, indicating the effectiveness of our method. ChestX-Ray14 Dataset. In Tab. 2, we report the performance of our method and previous approaches under differTable 1. Comparison results on ISIC2018 dataset. Method Ratio (%) Metrics (%) DL DU Acc Sen Spec AUC F1 Upper 100 0 95.10 75.20 94.94 95.43 70.13 Baseline 20 0 92.17 65.50 91.83 90.15 52.03 ST [8] 20 80 92.37 67.63 93.31 90.58 54.51 DCGAN [19] 20 80 92.27 67.72 92.56 91.28 54.10 TCSE [20] 20 80 92.35 68.17 92.51 92.24 58.44 TE [7] 20 80 92.26 69.81 92.55 92.70 59.33 MT [12] 20 80 92.48 69.75 92.20 92.96 59.10 SRC-MT [13] 20 80 92.54 71.47 92.72 93.58 60.68 STSC (ours) 20 80 93.95 72.60 92.94 93.73 60.84 Table 2. Comparison results on ChestX-Ray14 dataset. Ratio 2% 5% 10% 15% 20% GraphX [18] 53 58 63 68 78 SRC-MT 66.95 72.29 75.28 77.76 79.23 STSC (Ours) 65.37 71.49 76.21 78.31 79.45 ent percentages of labeled data on the ChestX-Ray14 dataset. GraphXNET is the baseline model and can achieve 78% AUC using 20% labeled data. However, its performance exhibits a large variance with respect to different labeled data percentages. Our method presents a more stable performance along with the changed labeled data percentages. SRC-MT is the previous SOTA method. Furthermore, it is indicated that the AUC of the STSC method is lower than SRC-MT method when the labeled data settings are 2% and 5%, showing superior performance when the labeled data percentage is increased. This phenomenon may be attributed to that the labeled data bene\ufb01ts from a more reliable relation structure. Feature Maps. Visualizations by Grad-CAM [21] are presented in Fig. 2, where the \ufb01rst two rows are for ISIC 2018 dataset. The attention regions by our model are consistent \fTable 3. Performance on ISIC 2018 dataset. Method Ratio (%) Metrics (%) DL DU Acc. Sen. Spec. AUC F1 Baseline 10 0 87.45 64.22 89.88 87.04 44.43 SRC-MT 10 90 89.30 66.29 90.47 90.31 50.02 STSC (ours) 10 90 91.12 68.36 91.48 91.29 52.17 Baseline 20 0 87.47 66.77 90.29 86.15 52.03 SRC-MT 20 80 92.54 71.47 92.72 93.58 60.68 STSC (ours) 20 80 93.95 72.60 92.94 93.73 60.84 Baseline 30 0 88.05 72.79 90.59 88.78 57.83 SRC-MT 30 70 93.11 74.59 92.85 94.27 63.54 STSC (ours) 30 70 94.12 73.46 92.75 93.69 63.23 Baseline 50 0 90.94 75.28 92.80 90.29 60.27 SRC-MT 50 50 93.56 76.79 93.60 93.38 65.74 STSC (ours) 50 50 93.68 76.78 93.47 94.53 66.24 Table 4. Different consistency terms on ISIC2018 dataset. Loss Metrics (%) Lc Lsc Ltc Acc. Sen. Spec. AUC \u00d7 \u00d7 \u00d7 91.66 65.39 92.01 85.38 \u221a \u00d7 \u00d7 92.45 67.88 92.08 87.24 \u00d7 \u221a \u00d7 91.67 65.25 91.44 84.60 \u00d7 \u00d7 \u221a 92.18 66.04 91.29 86.83 \u221a \u221a \u00d7 92.26 65.95 91.70 89.18 \u221a \u00d7 \u221a 92.10 63.81 89.50 88.57 \u00d7 \u221a \u221a 92.44 66.91 92.09 86.23 \u221a \u221a \u221a 93.95 72.60 92.94 93.73 with the lesion area from the doctor\u2019s experience, e.g., the attention maps in the \ufb01rst two rows are all nearly overlapped with the lesion area. Besides, in the last two rows, the attention map highlights the chest area with obvious symptoms. Relation Matrices. As shown in Fig. 3, we visualize the heatmaps of different relation matrices between the student and teacher model. It is shown that the heatmap values become much smaller along with the training, which indicates our method can learn discriminative features with the stable relation structures under perturbations. 3.3. Ablation Studies Different Percentages of Labeled Data. In Tab. 3, we study the in\ufb02uence of the different percentages of labeled data. Our method can obtain superior performance over the baseline and SOTA method with 10%, 20%, 30% and 50% labeled data. Besides, our method can achieve higher AUC and accuracy with only 20% labeled data, which is close to the upper bound trained with 100% labeled data. The model trained with 20% labeled data shows comparable performance over the supervised baseline model trained by 50% labeled data, which further validates the effectiveness of our approach. Different Consistency Terms. The ablation studies for different consistency terms are shown in Tab. 4. The model performs poorly when only using supervised loss term. Each ISIC2018 ChestX-ray14 Grad-CAM Grad-CAM++ Grad-CAM Grad-CAM++ Fig. 2. Visualizations on ISIC2018 and ChestX-Ray14. (a) epoch0 (b) epoch30 (c) epoch60 Fig. 3. Distance of the relation matrices at different epoches. unsupervised loss term can signi\ufb01cantly improve the \ufb01nal performance, especially for AUC. Besides, different combinations of unsupervised loss terms can work well. For example, both temporal consistency and spatial consistency loss can obtain higher AUC. When combining all unsupervised loss terms together, the model further achieves the best performance in all metrics. 4. CONCLUSION This work studies the semi-supervised medical image classi\ufb01cation to alleviate the need for labeled data utilized to train a DNN. We propose a novel STSC framework by considering the stability of samples spatial and temporal structure. Extensive experiments are conducted on two public benchmark medical image classi\ufb01cation datasets to demonstrate the effectiveness of our method on both single-label and multi-label medical image classi\ufb01cation tasks. Moreover, the visualization results are presented to validate the effectiveness of our method. 5. ACKNOWLEDGMENTS This work is supported by the National Natural Science Foundation of China (No. 62101351), and the GuangDong Basic and Applied Basic Research Foundation (No.2020A1515110 376). \f6."
|
| 46 |
+
}
|
| 47 |
+
]
|
| 48 |
+
},
|
| 49 |
+
"edge_feat": {}
|
| 50 |
+
}
|
| 51 |
+
}
|
title_31K_G/test_title_long_2404.19292v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19346v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19382v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19394v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19409v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19420v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19429v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19438v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19453v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19479v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19482v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19486v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19508v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19509v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19531v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19533v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19543v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19553v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19563v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19597v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19639v1.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"url": "http://arxiv.org/abs/2404.19639v1",
|
| 3 |
+
"title": "ESP-Zero: Unsupervised enhancement of zero-shot classification for Extremely Sparse Point cloud",
|
| 4 |
+
"abstract": "In recent years, zero-shot learning has attracted the focus of many\nresearchers, due to its flexibility and generality. Many approaches have been\nproposed to achieve the zero-shot classification of the point clouds for 3D\nobject understanding, following the schema of CLIP. However, in the real world,\nthe point clouds could be extremely sparse, dramatically limiting the\neffectiveness of the 3D point cloud encoders, and resulting in the misalignment\nof point cloud features and text embeddings. To the point cloud encoders to fit\nthe extremely sparse point clouds without re-running the pre-training procedure\nwhich could be time-consuming and expensive, in this work, we propose an\nunsupervised model adaptation approach to enhance the point cloud encoder for\nthe extremely sparse point clouds. We propose a novel fused-cross attention\nlayer that expands the pre-trained self-attention layer with additional\nlearnable tokens and attention blocks, which effectively modifies the point\ncloud features while maintaining the alignment between point cloud features and\ntext embeddings. We also propose a complementary learning-based\nself-distillation schema that encourages the modified features to be pulled\napart from the irrelevant text embeddings without overfitting the feature space\nto the observed text embeddings. Extensive experiments demonstrate that the\nproposed approach effectively increases the zero-shot capability on extremely\nsparse point clouds, and overwhelms other state-of-the-art model adaptation\napproaches.",
|
| 5 |
+
"authors": "Jiayi Han, Zidi Cao, Weibo Zheng, Xiangguo Zhou, Xiangjian He, Yuanfang Zhang, Daisen Wei",
|
| 6 |
+
"published": "2024-04-30",
|
| 7 |
+
"updated": "2024-04-30",
|
| 8 |
+
"primary_cat": "cs.CV",
|
| 9 |
+
"cats": [
|
| 10 |
+
"cs.CV"
|
| 11 |
+
],
|
| 12 |
+
"label": "Original Paper",
|
| 13 |
+
"paper_cat": "Distillation",
|
| 14 |
+
"gt": "ESP-Zero: Unsupervised enhancement of zero-shot classification for Extremely Sparse Point cloud",
|
| 15 |
+
"main_content": "Introduction 3D point cloud processing with deep-learning approaches has been explored deeply in recent years, due to its growing applications in VR, AR, autonomous driving, embodied intelligence, and so on. With the improvements in self-attention techniques, the performance of 3D point cloud processing has significantly increased. Despite the great success of 3D point cloud processing, researchers mainly focus on dense point clouds, while the processing of extremely sparse point clouds has not been fully explored, especially in zero-shot settings. As the density of points significantly influences the shape representation of the input object, the models trained with dense point clouds may lack generalization ability on those extremely sparse point clouds. Inspired by the pre-training schema in NLP tasks, some 3D object processing approaches with the pre-training step involve the training procedure of sparse point clouds. For example, PointBERT [1] groups the input point cloud into \u2217\u2020: Corresponding author arXiv:2404.19639v1 [cs.CV] 30 Apr 2024 \fRunning Title for Header Figure 1: Zero-shot classification accuracy on the ModelNet-40 dataset. Our approach dramatically increases the zero-shot capability on the extremely sparse point clouds. clusters via furthest point sampling (FPS), encodes each cluster into an embedding, masks 25% \u223c45% clusters out of the embeddings, and then recovers those masked embeddings. This training approach results in the training on the sparse point clouds. Similarly, I2P-MAE [2] also proposes to mask out 80% of points during the pre-training procedure. However, with an initial number of 8096 points, the down-sampled point cloud still has more than 1k points which is still very dense and may not be applicable in real usages. A potential solution for enhancing sparse point cloud classification is to complete the sparse point clouds, named point completion. However, researchers of point cloud completion mainly focus on completing the sparse point clouds with approximately 2k points to 16k points [3, 4, 5, 6]. Some of the point-completion approaches validated their performance on datasets with more sparse samples, for example, the KITTI dataset [7]. However, these methods are only validated on a limited number of categories (usually only \u201dvehicle\u201d is validated). Figure 2: The proposed approach enhances the zero-shot ability on the extremely sparse point clouds. To enhance the zero-shot classification capability on the extremely sparse point clouds, in this work, we propose a novel self-distillation approach that leverages the information of the dense point cloud and the text embedding of the observed categories. The proposed approach is based on a well-trained point cloud transformer, whose latent space is aligned with the latent space of text embeddings. For the alignment of point cloud feature space and text embedding space could be reserved, we introduce an additive tuning strategy. We freeze the weights of the pre-trained point cloud transformer and attach fused cross-attention (FCA) layers to it for model optimization. Each FCA layer consists of a batch of learnable tokens, a learnable self-attention (SA) block, and a frozen self-attention (SA) block that belongs to the pre-trained network. Following the cross-attention mechanism, the learnable tokens are first fed into the learnable SA block. Inspired by VPT [8], the modified tokens are concatenated to the encoded point tokens. The frozen SA block further encodes the merged tokens, and the learnable tokens are discarded in the output. Despite the small number of learnable tokens, FCA could effectively enhance the pre-trained model. 2 \fRunning Title for Header We also propose a novel complementary learning-based self-distillation approach to optimize the modified model for both visible and invisible categories in the training set. Different from normal distillation approaches that utilize the pseudo labels or modified output distributions of the input, we adopt complementary labels to specify the categories to which the input does not belong, and suppress the similarity of the 3D object representations and the text embeddings of the complementary labels. This approach allows the encoded sparse representation to be pulled apart from the unmatched text embeddings rather than fitting the most similar text embedding, which reduces the risk of overfitting the observed text embeddings during training. Our main contributions could be listed as follows. 1. To the best of our knowledge, this work proposes to enhance the zero-shot classification of extremely sparse point clouds for the first time. 2. We propose a fused cross-attention layer that introduces the refinement of frozen self-attention blocks, which effectively modifies the encoded representation space of the pre-trained model while maintaining its zero-shot capability. 3. We propose a complimentary learning-based self-distillation approach that pulls the sparse point cloud representation away from the unmatched label text embeddings, which decreases the potential overfitting. 2 Related works 2.1 Point cloud processing Since the success of PointNet [9], processing 3D objects in the form of point clouds has become a natural solution. PointNet++ [10] introduces the grouping layers in point cloud processing, which allows the deep models to leverage the neighboring information like convolution networks. Afterward, many researchers propose to utilize well-designed kernels to improve their performances. For example, KP-conv [11] utilizes a spherical neighborhood and a kernel function to determine the weight of each neighboring point during convolution. Edge-conv [12] utilizes the relative position to fetch the integrating weight of neighboring points. [13, 14] leverage local geometry for point cloud classification. [15] proposes multi-scale FPS to fuse point cloud features. [16] transforms points into a Hough space, and utilizes CNN-based networks to encode the points. Augmentations are also beneficial for point cloud processing [17]. In recent years, many approaches have introduced self-attention blocks to point cloud processing. Point Transformer [18] and Point Cloud Transformer [19] introduce the self-attention mechanism to point cloud processing for the first time. They utilize similar structures like PointNet++ and modify the feature aggregation with self-attention layers. Meanwhile, YOGO [20] proposes to group and embed the points once that first group and encode multiple sub-structures of the point cloud into embeddings, then calculate the cross-attention of grouped embeddings and all the points. This strategy effectively reduces the cost of SA-based point cloud processors but sacrifices some precision. To decrease the cost of self-attention in point cloud processing, PointFormer [21] proposes to utilize Linformer to replace the standard self-attention mechanism. SD-SA [22] validates the efficient self-attention mechanisms for the point cloud transformer and proposes to modify self-attention with skeleton decomposition to reduce the computational cost of the point cloud transformer. Inspired by NLP tasks, PointBERT [1] and I2P-MAE [2] introduce BERT-styled pre-training in 3D processing and achieve great success. They mask some tokens of the point cloud and train to recover those masked tokens. The fine-tuning procedure is followed to fetch the downstream capabilities. [23] utilizes a novel pipeline that leverages neural rendering and 2D images to align features of point clouds and images for effective model pre-training. 2.2 Zero-shot classification of point clouds Inspired by the success of CLIP [24], many approaches are proposed to classify point clouds in a zero-shot manner. PointCLIP [25] directly renders the point clouds to depth images and adopts a pre-trained CLIP model to classify the rendered images with prompts like \u201cthis is the depth map of \u201c{category}\u201d. CLIP2Point [26] proposes to render the initial 3D mesh and point cloud into images and depth maps, encode them by pre-trained CLIP models, and fine-tune the model to align their features. [27] aligns the seen semantics and point cloud features, and leverages the unlabeled object to address the downstream issues like domain adaption. ULIP and ULIP-2 [28, 29] further introduce the text embeddings in the training phase. The point cloud features are aligned to both CLIP features and corresponding text embeddings of the ground truth labels. 3 \fRunning Title for Header Figure 3: The overall framework of the proposed approach. The Dense point cloud is down-sampled to a sparse point cloud, grouped by KNN, and encoded to point cloud tokens. The initial model is modified with a trainable FCA block when processing the tokens of the sparse point cloud. We then distill the model with infoNCE loss and CL loss with the assistance of text embeddings. 2.3 Point cloud completion Point completion network (PCN) [30] is one of the most classical point cloud completion approaches, which introduces both coarse and fine-grained supervision during training. Many approaches pay attention to refining the details of the completed point clouds. PMP-Net [4] proposed to iteratively refine the sparse point cloud. After each refinement step, the modified point cloud is further refined, until it reaches the maximum refinement steps. PMP-Net++ [5] further enhances the PMP-Net with the self-attention mechanism. SeedFormer [6] introduces patch seeds as shape representation which leverages both global information (of the partial point cloud) and local geometry. Similarly, [31] also leverages the geometry transformation to recover the missing point. USSPA [32] introduces a shape-preserving autoencoder for point cloud completion without supervision. Adapted loss functions are also introduced to enhance the point cloud completion [3]. 3 Method 3.1 Overall Architecture As depicted in Fig. 3, the overall framework consists of a pre-trained point cloud transformer with grouping and encoding layers, encoding layers of the point embeddings, and a final projecting layer that projects the point cloud representation to the latent space shared with text embeddings. The pre-trained network could be seen as a teacher model. To modify the pre-trained model, we expand the pre-trained encoding layers to an FCA block. A trainable FCA block consists of additional learnable tokens, an SA block, and the corresponding frozen encoding layer. By modifying the trainable parts in the FCA block, the model could be enhanced for extremely sparse point clouds. During training, the dense point cloud is down-sampled to a sparse point cloud. The dense point cloud is directly encoded with the pre-trained model to obtain a standard representation. The model modified by the FCA block encodes the sparse point cloud. The inner product of text embeddings and the dense representations is utilized to fetch the pseudo supervision for the sparse representation to learn from. During testing, the text embedding with the largest similarity to the sparse representation indicates the final prediction. Note that since the pre-trained weights are fixed, and the FCA block could be directly rolled back to the initial encoding layer, therefore we could avoid catastrophic forgetting. 3.2 Fused Cross-attention (FCA) The pre-trained point-cloud transformer is well-aligned with text embedding, which is crucial to zero-shot classification. To fine-tune the pre-trained model without eliminating its zero-shot capability, we introduce fused cross-attention to each transformer block of the point-cloud transformer. 4 \fRunning Title for Header Figure 4: The structure of FCA. The learnable tokens are firstly processed by an SA block, then combined with the encoded point cloud tokens, pass through an encoder block, and only output the point cloud tokens. For each transformer block, we add m learnable tokens which are randomly initialized. We first forward the learnable tokens with a self-attention block, then fuse them to the encoded point cloud tokens. Specifically, denote the encoded point cloud tokens as T = {ti}n i=1 and the learnable tokens as P = {pj}m j=1. The learnable tokens are first processed as in Eq. 1: ( \u02c6 P = P Wq(P Wk)T \u03bb PWv P \u2032 = \u02c6 PWP , (1) in which SA and FFN represent the self-attention and the feed-forward network, respectively. P \u2032 is then fused with T via self-attention as follows: ( \u02c6 T = [T ;P \u2032]Wq([W ;P \u2032]Wk)T \u03bb [T; P \u2032]Wv T \u2032 = \u02c6 TWT , (2) After the fusion procedure, the modified point tokens are fed forward, while the learnable tokens are discarded so that the total number of tokens is consistent. The final output of each FCA layer is as T \u2032 0:|T |. 3.3 Self-distillation with complementary learning To enhance the classification ability of the pre-trained point cloud transformer, we propose a self-distillation schema to optimize the learnable parameters (the learnable tokens, q, k, v projections, and FFN) in FCA. The pre-trained point cloud transformer without FCA is utilized to encode the dense point cloud (with 2048 points) to form its standard representation which is well-aligned to the text embedding of the prompt template of the corresponding category. The text embedding could be obtained by the corresponding text encoder, in our case, a text transformer aligned with the pre-trained point cloud transformer. Pseudo label for Self-distillation Denote the standard representation as R \u2208R1,C, and the text embeddings of the prompt templates (as shown in Fig. 3) of involved categories as E \u2208RN,C, in which N, C represent the number of involved categories and the dimensionality of the latent space, respectively. The complementary labels of the input objects could be obtained by the similarity of the standard representation and text embeddings. We first compute the similarity of the standard representation and the text embeddings: q = ERT ||E||2||R||2 . (3) 5 \fRunning Title for Header Figure 5: The difference between pseudo label and complementary label. Via pseudo labeling (a), the sparse representation is encouraged to be aligned with the text embedding of the pseudo label. On the contrary, complementary learning (b) encourages the sparse representation to be apart from the unmatched text embeddings. A direct optimization approach could be utilizing the pseudo labels. For a given input x, its pseudo label could be formulated as follows: (\u02c6 y|x)i = ( 1, i = argmax i qi 0, otherwise (4) Then we obtain the sparse representations. In this work, we down-sample the dense point cloud according to the uniform distribution. We then encode the down-sampled point cloud with the modified model (with FCA) to obtain the sparse representations. The cross-entropy loss could be formulated as in Eq. 5: losspl = \u2212P i (\u02c6 y|x)ilog \u0000Softmax(\u02c6 q/\u03c4)i \u0001 (5) By optimizing Eq. 5, the object representation R|x could be modified to match the most probable text embedding. Although utilizing pseudo labels could be a direct self-distillation approach since the matched embedding could be seen as an approximation of the standard representation, this process might result in the model overfitting on the training text embeddings, and decrease the zero-shot ability on the unseen categories. Complementary learning for dense-to-sparse self-distillation Different from learning with pseudo labels which directly minimize the distance between the sparse representation and the matched text embeddings, complementary learning aims to learn from the labels that an input does not belong to, namely \u201ccomplementary labels\u201d. Complementary learning allows the model to pull the sparse representation apart from the unmatched text embeddings. ECL[33] proves that the complementary labels are more accurate and could provide effective information in unsupervised domain adaptation, compared with pseudo labels. To enhance the zero-shot ability of the pre-trained model, instead of utilizing the pseudo labels, we adopt a complementary learning-based approach to leverage the information of labels without overfitting the observed ones. Complementary learning fetches the most \u201cimprobable\u201d categories and fine-tunes the model to push the object representations apart from those unmatched text embeddings. In this setting, the representations are not matched to a certain embedding. We select the improbable (or negative) categories based on q. The k categories with the smallest similarities to the standard representation are set to negative categories. Denote the inner product of the sparse representations and text embedding to \u02c6 q. The loss function could be formulated as follows: lossCL = \u2212P i\u2208C\u2212log(1 \u2212Softmax(\u02c6 q/\u03c4)i) (6) in which C\u2212represents the set of negative categories and \u03c4 represents temperature. Total loss Apart from the complementary loss, we also introduce infoNCE loss [34] to align the sparse representation to the standard representation, which could be formulated as Eq. 7: losssd = exp(RT i Rs i /\u03c4) P j\u0338=i exp(RT j Rs i /\u03c4), (7) in which Rs i represents the sparse representation of the ith object. The total loss could be formulated as follows: losstotal = \u03bblosssd + lossCL, (8) where \u03bb is the balance coefficient. 6 \fRunning Title for Header 4 Experiments 4.1 Implementation details In this work, we validate the proposed approach on two benchmark datasets: ModelNet40[35] and PartNet[36]. ModelNet40 dataset contains 12,311 objects (9843 for training) from 40 categories. PartNet dataset contains 26,671 objects from 24 categories. They share nine categories, and the others are unique to each other. All models are trained on a Nvidia-4090 GPU within 16 epochs. We adopt a cross-validation schema that trains the model on one dataset and validates it on the other. We show the result for the \u201cUnseen\u201d categories (the unique categories of each dataset). We set the number of learnable tokens to 12, and the coefficient \u03bb to 0.2. 4.2 Comparison with SOTA approaches We compare the proposed approach with other zero-shot learners and unsupervised model adaption approaches. Note that for PL, Tent[37] and USKD-PL[38], we utilize the prediction of 2048 points to obtain the pseudo label or label distribution, for a fair comparison with the proposed approach. However, due to the nature of RPL [39], we could not involve the standard representation during adaptation, so only the sparse representation is utilized. In Tab. 1, we demonstrate the model performance on the unseen classes that are unique to each dataset. The result shows that our approach dramatically increases the classification of the unseen categories, in an average of 8.1% on the PartNet dataset, and 8.7% on the ModelNet40 dataset, which demonstrates the effectiveness of our method. Table 1: Zero-shot classification accuracy of UnSeen categories on ModelNet40 and PartNet dataset. Dataset M40\u2192Partnet PartNet\u2192M40 Num Points 128 64 16 Mean 128 64 16 Mean PointBERT[1] 45.2 17.0 0.2 20.8 23.1 12.1 5.2 13.5 PointMLP[40] 40.3 10.8 0.0 17.0 20.1 6.5 3.0 9.9 ULIP-2[29] 33.3 17.6 7.8 19.6 31.7 15.4 4.4 17.2 Pseudo Label 38.9 29.5 13.8 27.4 25.8 15.7 3.6 15.0 TENT[37] 40.1 22.4 9.8 24.1 30.6 16.9 3.4 17.0 RPL[39] 30.6 16.8 11.1 19.5 31.0 15.6 3.1 16.6 USKD[38] 27.2 26.1 19.7 24.3 25.5 17.5 6.4 16.5 USKD-PL[38] 27.2 16.4 3.3 15.6 31.8 19.2 5.7 18.9 Ours 45.8 39.9 20.8 35.5 40.4 29.9 12.4 27.6 4.3 Point cloud completion may not be sufficient for extremely sparse point cloud zero-shot classification It is a direct approach to improving the classification performance by point cloud completion. In this work, we validate two popular point cloud completion methods: PCN [30] and SeedFormer [6]. For the PCN, due to the flexibility of PCN architecture, we modify the PCN network by changing the coarse output to 256 points and the fine-grained output to 2048 points and training the modified PCN in both end-to-end and independent manners. The end-to-end manner means that the modified PCN is trained along with the pre-trained ULIP model. For the independent manner, we first train point completion independently, then merge the modified PCN and the ULIP model. SeedFormer leverages grouping layers which are introduced in PointNet++. As there are many fixed parameters that could influence the model\u2019s performance, we maintain its initial architecture and utilize its pre-trained model. Since the SeedFormer holds the limitation of 256 input points, we simply repeat the point cloud to meet its limitation. The results are shown in Tab.1. Seedformer pre-trained with PCN [30] dataset lacks generality on the extremely sparse point clouds of the ModelNet40 dataset, thus resulting in a performance decrease. Even if the model is fine-tuned for ModelNet-40, a performance decrease is still observed. Only when the PCN is trained in an end-to-end manner can the performance be increased, which demonstrates that simply applying point cloud completion is not sufficient for extremely sparse point cloud zero-shot classification. 4.4 Ablation study Validation of the proposed modules We first validate the effectiveness of the proposed modules on both ModelNet40 and PartNet. The result is shown in Tab. 3. \u201cCL\u201d demonstrates the complementary learning, and \u201cCA\u201d demonstrate the cross-attention. Note that the baseline represents the pre-trained ULIP-2 backbone and the model trained only by 7 \fRunning Title for Header Table 2: Validation of point cloud completion for extremely sparse point cloud (128 points) on the FULL ModelNet40 test set. Model Training strategy Acc(%) SeedFormer[6] pre-trained on PCN 5.0 PCN-modified[30] two-stage 20.9 PCN-modified[30] end-to-end 37.2 No Completion / 35.4 InfoNCE needs an additional MLP head following the model. The result demonstrates that by introducing the learnable tokens, the model performance is significantly increased. By adopting the cross attention in FCA, the model is further improved, with an average of 1.3% performance gain. The complementary loss dramatically increases the model\u2019s performance, on an average of 5.6%, which demonstrates the effectiveness of the proposed modules. Table 3: The ablation study of the proposed modules, validated on the PartNet dataset. Note that the baseline (no modules are involved) represents ULIP-2. When only InfoNCE is adopted, a learnable MLP is followed to the pre-trained model. Self-distillation FCA Accuracy InfoNCE CL tokens CA N=128 N=64 N=16 \u00d7 \u00d7 \u00d7 \u00d7 40.6% 24.8% 9.0% \u2713 \u00d7 \u00d7 \u00d7 31.4% 22.2% 6.7% \u2713 \u00d7 \u2713 \u00d7 44.2% 39.6% 25.9% \u2713 \u00d7 \u2713 \u2713 45.4% 40.5% 27.5% \u2713 \u2713 \u2713 \u2713 53.2% 47.9% 29.2% Further validation of point cloud completion To further validate whether point completion is beneficial to super sparse point clouds, we also validate the combination of PCN+learnable tokens+InfoNCE and show the result in Tab. 4. The PCN results in a crucial performance decrease, especially with a short number of points. This result demonstrates that the PCN is not able to perform robust point completion, and might overfit on the training samples, which leads to the loss of generality in zero-shot classification. Table 4: Further validation of point cloud completion for zero-shot super sparse point cloud classification on PartNet dataset. N=128 N=64 N=16 w PCN 49.7% 37.0% 15.1% w/o PCN 53.2% 47.9% 30.2% Number of learnable tokens We also validate the influence of the number of learnable tokens, without the cross attention, on the UnSeen categories of PartNet. As shown in Tab. 5, increasing the number of learnable tokens benefits the performance, but the improvement becomes marginal with the number getting larger. Table 5: The influence of the number of learnable tokens. Increasing the number of learnable tokens benefits the model performance, but the improvement is getting marginal with N getting large. Num of Tokens Num of Points Mean 128 64 16 4 32.0 21.9 7.3 20.4 8 36.6 31.3 18.6 28.8 12 43.0 35.1 20.8 33.0 24 41.2 36.6 25.6 34.5 Down-sampling strategies Except for the random down-sampling, we also validate the model performance of the KNN down-sampling strategy. This strategy selects a random center point and then samples the K nearest points to 8 \fRunning Title for Header the center point. The result is shown in Tab. 6, which demonstrates that the proposed approach could generalize on different down-sampling strategies. Table 6: The influence of different down-sampling strategies during the test phase on the ModelNet40 dataset. Compared with the ULIP-2 and its enhancement via pseudo labels, the proposed approach achieves a significant improvement. Model N=128 N=64 N=16 Mean ULIP-2 27.7 13.2 3.3 14.7 PL 29.9 17.7 4.0 17.2 Ours 38.0 26.5 6.6 23.7 5 Conclusion In this work, we raise the issue of zero-shot super sparse point cloud classification for the first time and propose a simple yet effective unsupervised training schema that effectively enhances the zero-shot classification ability of super sparse point clouds. We propose a learnable FCA to modify the latent space of the point cloud encoder while maintaining its alignment with text embeddings. We also propose a complementary learning-based self-distillation approach that leverages the training labels without overfitting the training text embeddings. Explicit experiments demonstrate the effectiveness of the proposed approach, which dramatically increases the zero-shot classification performance on the super sparse point clouds.",
|
| 16 |
+
"additional_graph_info": {
|
| 17 |
+
"graph": [],
|
| 18 |
+
"node_feat": {
|
| 19 |
+
"Jiayi Han": [
|
| 20 |
+
{
|
| 21 |
+
"url": "http://arxiv.org/abs/2404.19639v1",
|
| 22 |
+
"title": "ESP-Zero: Unsupervised enhancement of zero-shot classification for Extremely Sparse Point cloud",
|
| 23 |
+
"abstract": "In recent years, zero-shot learning has attracted the focus of many\nresearchers, due to its flexibility and generality. Many approaches have been\nproposed to achieve the zero-shot classification of the point clouds for 3D\nobject understanding, following the schema of CLIP. However, in the real world,\nthe point clouds could be extremely sparse, dramatically limiting the\neffectiveness of the 3D point cloud encoders, and resulting in the misalignment\nof point cloud features and text embeddings. To the point cloud encoders to fit\nthe extremely sparse point clouds without re-running the pre-training procedure\nwhich could be time-consuming and expensive, in this work, we propose an\nunsupervised model adaptation approach to enhance the point cloud encoder for\nthe extremely sparse point clouds. We propose a novel fused-cross attention\nlayer that expands the pre-trained self-attention layer with additional\nlearnable tokens and attention blocks, which effectively modifies the point\ncloud features while maintaining the alignment between point cloud features and\ntext embeddings. We also propose a complementary learning-based\nself-distillation schema that encourages the modified features to be pulled\napart from the irrelevant text embeddings without overfitting the feature space\nto the observed text embeddings. Extensive experiments demonstrate that the\nproposed approach effectively increases the zero-shot capability on extremely\nsparse point clouds, and overwhelms other state-of-the-art model adaptation\napproaches.",
|
| 24 |
+
"authors": "Jiayi Han, Zidi Cao, Weibo Zheng, Xiangguo Zhou, Xiangjian He, Yuanfang Zhang, Daisen Wei",
|
| 25 |
+
"published": "2024-04-30",
|
| 26 |
+
"updated": "2024-04-30",
|
| 27 |
+
"primary_cat": "cs.CV",
|
| 28 |
+
"cats": [
|
| 29 |
+
"cs.CV"
|
| 30 |
+
],
|
| 31 |
+
"main_content": "Introduction 3D point cloud processing with deep-learning approaches has been explored deeply in recent years, due to its growing applications in VR, AR, autonomous driving, embodied intelligence, and so on. With the improvements in self-attention techniques, the performance of 3D point cloud processing has significantly increased. Despite the great success of 3D point cloud processing, researchers mainly focus on dense point clouds, while the processing of extremely sparse point clouds has not been fully explored, especially in zero-shot settings. As the density of points significantly influences the shape representation of the input object, the models trained with dense point clouds may lack generalization ability on those extremely sparse point clouds. Inspired by the pre-training schema in NLP tasks, some 3D object processing approaches with the pre-training step involve the training procedure of sparse point clouds. For example, PointBERT [1] groups the input point cloud into \u2217\u2020: Corresponding author arXiv:2404.19639v1 [cs.CV] 30 Apr 2024 \fRunning Title for Header Figure 1: Zero-shot classification accuracy on the ModelNet-40 dataset. Our approach dramatically increases the zero-shot capability on the extremely sparse point clouds. clusters via furthest point sampling (FPS), encodes each cluster into an embedding, masks 25% \u223c45% clusters out of the embeddings, and then recovers those masked embeddings. This training approach results in the training on the sparse point clouds. Similarly, I2P-MAE [2] also proposes to mask out 80% of points during the pre-training procedure. However, with an initial number of 8096 points, the down-sampled point cloud still has more than 1k points which is still very dense and may not be applicable in real usages. A potential solution for enhancing sparse point cloud classification is to complete the sparse point clouds, named point completion. However, researchers of point cloud completion mainly focus on completing the sparse point clouds with approximately 2k points to 16k points [3, 4, 5, 6]. Some of the point-completion approaches validated their performance on datasets with more sparse samples, for example, the KITTI dataset [7]. However, these methods are only validated on a limited number of categories (usually only \u201dvehicle\u201d is validated). Figure 2: The proposed approach enhances the zero-shot ability on the extremely sparse point clouds. To enhance the zero-shot classification capability on the extremely sparse point clouds, in this work, we propose a novel self-distillation approach that leverages the information of the dense point cloud and the text embedding of the observed categories. The proposed approach is based on a well-trained point cloud transformer, whose latent space is aligned with the latent space of text embeddings. For the alignment of point cloud feature space and text embedding space could be reserved, we introduce an additive tuning strategy. We freeze the weights of the pre-trained point cloud transformer and attach fused cross-attention (FCA) layers to it for model optimization. Each FCA layer consists of a batch of learnable tokens, a learnable self-attention (SA) block, and a frozen self-attention (SA) block that belongs to the pre-trained network. Following the cross-attention mechanism, the learnable tokens are first fed into the learnable SA block. Inspired by VPT [8], the modified tokens are concatenated to the encoded point tokens. The frozen SA block further encodes the merged tokens, and the learnable tokens are discarded in the output. Despite the small number of learnable tokens, FCA could effectively enhance the pre-trained model. 2 \fRunning Title for Header We also propose a novel complementary learning-based self-distillation approach to optimize the modified model for both visible and invisible categories in the training set. Different from normal distillation approaches that utilize the pseudo labels or modified output distributions of the input, we adopt complementary labels to specify the categories to which the input does not belong, and suppress the similarity of the 3D object representations and the text embeddings of the complementary labels. This approach allows the encoded sparse representation to be pulled apart from the unmatched text embeddings rather than fitting the most similar text embedding, which reduces the risk of overfitting the observed text embeddings during training. Our main contributions could be listed as follows. 1. To the best of our knowledge, this work proposes to enhance the zero-shot classification of extremely sparse point clouds for the first time. 2. We propose a fused cross-attention layer that introduces the refinement of frozen self-attention blocks, which effectively modifies the encoded representation space of the pre-trained model while maintaining its zero-shot capability. 3. We propose a complimentary learning-based self-distillation approach that pulls the sparse point cloud representation away from the unmatched label text embeddings, which decreases the potential overfitting. 2 Related works 2.1 Point cloud processing Since the success of PointNet [9], processing 3D objects in the form of point clouds has become a natural solution. PointNet++ [10] introduces the grouping layers in point cloud processing, which allows the deep models to leverage the neighboring information like convolution networks. Afterward, many researchers propose to utilize well-designed kernels to improve their performances. For example, KP-conv [11] utilizes a spherical neighborhood and a kernel function to determine the weight of each neighboring point during convolution. Edge-conv [12] utilizes the relative position to fetch the integrating weight of neighboring points. [13, 14] leverage local geometry for point cloud classification. [15] proposes multi-scale FPS to fuse point cloud features. [16] transforms points into a Hough space, and utilizes CNN-based networks to encode the points. Augmentations are also beneficial for point cloud processing [17]. In recent years, many approaches have introduced self-attention blocks to point cloud processing. Point Transformer [18] and Point Cloud Transformer [19] introduce the self-attention mechanism to point cloud processing for the first time. They utilize similar structures like PointNet++ and modify the feature aggregation with self-attention layers. Meanwhile, YOGO [20] proposes to group and embed the points once that first group and encode multiple sub-structures of the point cloud into embeddings, then calculate the cross-attention of grouped embeddings and all the points. This strategy effectively reduces the cost of SA-based point cloud processors but sacrifices some precision. To decrease the cost of self-attention in point cloud processing, PointFormer [21] proposes to utilize Linformer to replace the standard self-attention mechanism. SD-SA [22] validates the efficient self-attention mechanisms for the point cloud transformer and proposes to modify self-attention with skeleton decomposition to reduce the computational cost of the point cloud transformer. Inspired by NLP tasks, PointBERT [1] and I2P-MAE [2] introduce BERT-styled pre-training in 3D processing and achieve great success. They mask some tokens of the point cloud and train to recover those masked tokens. The fine-tuning procedure is followed to fetch the downstream capabilities. [23] utilizes a novel pipeline that leverages neural rendering and 2D images to align features of point clouds and images for effective model pre-training. 2.2 Zero-shot classification of point clouds Inspired by the success of CLIP [24], many approaches are proposed to classify point clouds in a zero-shot manner. PointCLIP [25] directly renders the point clouds to depth images and adopts a pre-trained CLIP model to classify the rendered images with prompts like \u201cthis is the depth map of \u201c{category}\u201d. CLIP2Point [26] proposes to render the initial 3D mesh and point cloud into images and depth maps, encode them by pre-trained CLIP models, and fine-tune the model to align their features. [27] aligns the seen semantics and point cloud features, and leverages the unlabeled object to address the downstream issues like domain adaption. ULIP and ULIP-2 [28, 29] further introduce the text embeddings in the training phase. The point cloud features are aligned to both CLIP features and corresponding text embeddings of the ground truth labels. 3 \fRunning Title for Header Figure 3: The overall framework of the proposed approach. The Dense point cloud is down-sampled to a sparse point cloud, grouped by KNN, and encoded to point cloud tokens. The initial model is modified with a trainable FCA block when processing the tokens of the sparse point cloud. We then distill the model with infoNCE loss and CL loss with the assistance of text embeddings. 2.3 Point cloud completion Point completion network (PCN) [30] is one of the most classical point cloud completion approaches, which introduces both coarse and fine-grained supervision during training. Many approaches pay attention to refining the details of the completed point clouds. PMP-Net [4] proposed to iteratively refine the sparse point cloud. After each refinement step, the modified point cloud is further refined, until it reaches the maximum refinement steps. PMP-Net++ [5] further enhances the PMP-Net with the self-attention mechanism. SeedFormer [6] introduces patch seeds as shape representation which leverages both global information (of the partial point cloud) and local geometry. Similarly, [31] also leverages the geometry transformation to recover the missing point. USSPA [32] introduces a shape-preserving autoencoder for point cloud completion without supervision. Adapted loss functions are also introduced to enhance the point cloud completion [3]. 3 Method 3.1 Overall Architecture As depicted in Fig. 3, the overall framework consists of a pre-trained point cloud transformer with grouping and encoding layers, encoding layers of the point embeddings, and a final projecting layer that projects the point cloud representation to the latent space shared with text embeddings. The pre-trained network could be seen as a teacher model. To modify the pre-trained model, we expand the pre-trained encoding layers to an FCA block. A trainable FCA block consists of additional learnable tokens, an SA block, and the corresponding frozen encoding layer. By modifying the trainable parts in the FCA block, the model could be enhanced for extremely sparse point clouds. During training, the dense point cloud is down-sampled to a sparse point cloud. The dense point cloud is directly encoded with the pre-trained model to obtain a standard representation. The model modified by the FCA block encodes the sparse point cloud. The inner product of text embeddings and the dense representations is utilized to fetch the pseudo supervision for the sparse representation to learn from. During testing, the text embedding with the largest similarity to the sparse representation indicates the final prediction. Note that since the pre-trained weights are fixed, and the FCA block could be directly rolled back to the initial encoding layer, therefore we could avoid catastrophic forgetting. 3.2 Fused Cross-attention (FCA) The pre-trained point-cloud transformer is well-aligned with text embedding, which is crucial to zero-shot classification. To fine-tune the pre-trained model without eliminating its zero-shot capability, we introduce fused cross-attention to each transformer block of the point-cloud transformer. 4 \fRunning Title for Header Figure 4: The structure of FCA. The learnable tokens are firstly processed by an SA block, then combined with the encoded point cloud tokens, pass through an encoder block, and only output the point cloud tokens. For each transformer block, we add m learnable tokens which are randomly initialized. We first forward the learnable tokens with a self-attention block, then fuse them to the encoded point cloud tokens. Specifically, denote the encoded point cloud tokens as T = {ti}n i=1 and the learnable tokens as P = {pj}m j=1. The learnable tokens are first processed as in Eq. 1: ( \u02c6 P = P Wq(P Wk)T \u03bb PWv P \u2032 = \u02c6 PWP , (1) in which SA and FFN represent the self-attention and the feed-forward network, respectively. P \u2032 is then fused with T via self-attention as follows: ( \u02c6 T = [T ;P \u2032]Wq([W ;P \u2032]Wk)T \u03bb [T; P \u2032]Wv T \u2032 = \u02c6 TWT , (2) After the fusion procedure, the modified point tokens are fed forward, while the learnable tokens are discarded so that the total number of tokens is consistent. The final output of each FCA layer is as T \u2032 0:|T |. 3.3 Self-distillation with complementary learning To enhance the classification ability of the pre-trained point cloud transformer, we propose a self-distillation schema to optimize the learnable parameters (the learnable tokens, q, k, v projections, and FFN) in FCA. The pre-trained point cloud transformer without FCA is utilized to encode the dense point cloud (with 2048 points) to form its standard representation which is well-aligned to the text embedding of the prompt template of the corresponding category. The text embedding could be obtained by the corresponding text encoder, in our case, a text transformer aligned with the pre-trained point cloud transformer. Pseudo label for Self-distillation Denote the standard representation as R \u2208R1,C, and the text embeddings of the prompt templates (as shown in Fig. 3) of involved categories as E \u2208RN,C, in which N, C represent the number of involved categories and the dimensionality of the latent space, respectively. The complementary labels of the input objects could be obtained by the similarity of the standard representation and text embeddings. We first compute the similarity of the standard representation and the text embeddings: q = ERT ||E||2||R||2 . (3) 5 \fRunning Title for Header Figure 5: The difference between pseudo label and complementary label. Via pseudo labeling (a), the sparse representation is encouraged to be aligned with the text embedding of the pseudo label. On the contrary, complementary learning (b) encourages the sparse representation to be apart from the unmatched text embeddings. A direct optimization approach could be utilizing the pseudo labels. For a given input x, its pseudo label could be formulated as follows: (\u02c6 y|x)i = ( 1, i = argmax i qi 0, otherwise (4) Then we obtain the sparse representations. In this work, we down-sample the dense point cloud according to the uniform distribution. We then encode the down-sampled point cloud with the modified model (with FCA) to obtain the sparse representations. The cross-entropy loss could be formulated as in Eq. 5: losspl = \u2212P i (\u02c6 y|x)ilog \u0000Softmax(\u02c6 q/\u03c4)i \u0001 (5) By optimizing Eq. 5, the object representation R|x could be modified to match the most probable text embedding. Although utilizing pseudo labels could be a direct self-distillation approach since the matched embedding could be seen as an approximation of the standard representation, this process might result in the model overfitting on the training text embeddings, and decrease the zero-shot ability on the unseen categories. Complementary learning for dense-to-sparse self-distillation Different from learning with pseudo labels which directly minimize the distance between the sparse representation and the matched text embeddings, complementary learning aims to learn from the labels that an input does not belong to, namely \u201ccomplementary labels\u201d. Complementary learning allows the model to pull the sparse representation apart from the unmatched text embeddings. ECL[33] proves that the complementary labels are more accurate and could provide effective information in unsupervised domain adaptation, compared with pseudo labels. To enhance the zero-shot ability of the pre-trained model, instead of utilizing the pseudo labels, we adopt a complementary learning-based approach to leverage the information of labels without overfitting the observed ones. Complementary learning fetches the most \u201cimprobable\u201d categories and fine-tunes the model to push the object representations apart from those unmatched text embeddings. In this setting, the representations are not matched to a certain embedding. We select the improbable (or negative) categories based on q. The k categories with the smallest similarities to the standard representation are set to negative categories. Denote the inner product of the sparse representations and text embedding to \u02c6 q. The loss function could be formulated as follows: lossCL = \u2212P i\u2208C\u2212log(1 \u2212Softmax(\u02c6 q/\u03c4)i) (6) in which C\u2212represents the set of negative categories and \u03c4 represents temperature. Total loss Apart from the complementary loss, we also introduce infoNCE loss [34] to align the sparse representation to the standard representation, which could be formulated as Eq. 7: losssd = exp(RT i Rs i /\u03c4) P j\u0338=i exp(RT j Rs i /\u03c4), (7) in which Rs i represents the sparse representation of the ith object. The total loss could be formulated as follows: losstotal = \u03bblosssd + lossCL, (8) where \u03bb is the balance coefficient. 6 \fRunning Title for Header 4 Experiments 4.1 Implementation details In this work, we validate the proposed approach on two benchmark datasets: ModelNet40[35] and PartNet[36]. ModelNet40 dataset contains 12,311 objects (9843 for training) from 40 categories. PartNet dataset contains 26,671 objects from 24 categories. They share nine categories, and the others are unique to each other. All models are trained on a Nvidia-4090 GPU within 16 epochs. We adopt a cross-validation schema that trains the model on one dataset and validates it on the other. We show the result for the \u201cUnseen\u201d categories (the unique categories of each dataset). We set the number of learnable tokens to 12, and the coefficient \u03bb to 0.2. 4.2 Comparison with SOTA approaches We compare the proposed approach with other zero-shot learners and unsupervised model adaption approaches. Note that for PL, Tent[37] and USKD-PL[38], we utilize the prediction of 2048 points to obtain the pseudo label or label distribution, for a fair comparison with the proposed approach. However, due to the nature of RPL [39], we could not involve the standard representation during adaptation, so only the sparse representation is utilized. In Tab. 1, we demonstrate the model performance on the unseen classes that are unique to each dataset. The result shows that our approach dramatically increases the classification of the unseen categories, in an average of 8.1% on the PartNet dataset, and 8.7% on the ModelNet40 dataset, which demonstrates the effectiveness of our method. Table 1: Zero-shot classification accuracy of UnSeen categories on ModelNet40 and PartNet dataset. Dataset M40\u2192Partnet PartNet\u2192M40 Num Points 128 64 16 Mean 128 64 16 Mean PointBERT[1] 45.2 17.0 0.2 20.8 23.1 12.1 5.2 13.5 PointMLP[40] 40.3 10.8 0.0 17.0 20.1 6.5 3.0 9.9 ULIP-2[29] 33.3 17.6 7.8 19.6 31.7 15.4 4.4 17.2 Pseudo Label 38.9 29.5 13.8 27.4 25.8 15.7 3.6 15.0 TENT[37] 40.1 22.4 9.8 24.1 30.6 16.9 3.4 17.0 RPL[39] 30.6 16.8 11.1 19.5 31.0 15.6 3.1 16.6 USKD[38] 27.2 26.1 19.7 24.3 25.5 17.5 6.4 16.5 USKD-PL[38] 27.2 16.4 3.3 15.6 31.8 19.2 5.7 18.9 Ours 45.8 39.9 20.8 35.5 40.4 29.9 12.4 27.6 4.3 Point cloud completion may not be sufficient for extremely sparse point cloud zero-shot classification It is a direct approach to improving the classification performance by point cloud completion. In this work, we validate two popular point cloud completion methods: PCN [30] and SeedFormer [6]. For the PCN, due to the flexibility of PCN architecture, we modify the PCN network by changing the coarse output to 256 points and the fine-grained output to 2048 points and training the modified PCN in both end-to-end and independent manners. The end-to-end manner means that the modified PCN is trained along with the pre-trained ULIP model. For the independent manner, we first train point completion independently, then merge the modified PCN and the ULIP model. SeedFormer leverages grouping layers which are introduced in PointNet++. As there are many fixed parameters that could influence the model\u2019s performance, we maintain its initial architecture and utilize its pre-trained model. Since the SeedFormer holds the limitation of 256 input points, we simply repeat the point cloud to meet its limitation. The results are shown in Tab.1. Seedformer pre-trained with PCN [30] dataset lacks generality on the extremely sparse point clouds of the ModelNet40 dataset, thus resulting in a performance decrease. Even if the model is fine-tuned for ModelNet-40, a performance decrease is still observed. Only when the PCN is trained in an end-to-end manner can the performance be increased, which demonstrates that simply applying point cloud completion is not sufficient for extremely sparse point cloud zero-shot classification. 4.4 Ablation study Validation of the proposed modules We first validate the effectiveness of the proposed modules on both ModelNet40 and PartNet. The result is shown in Tab. 3. \u201cCL\u201d demonstrates the complementary learning, and \u201cCA\u201d demonstrate the cross-attention. Note that the baseline represents the pre-trained ULIP-2 backbone and the model trained only by 7 \fRunning Title for Header Table 2: Validation of point cloud completion for extremely sparse point cloud (128 points) on the FULL ModelNet40 test set. Model Training strategy Acc(%) SeedFormer[6] pre-trained on PCN 5.0 PCN-modified[30] two-stage 20.9 PCN-modified[30] end-to-end 37.2 No Completion / 35.4 InfoNCE needs an additional MLP head following the model. The result demonstrates that by introducing the learnable tokens, the model performance is significantly increased. By adopting the cross attention in FCA, the model is further improved, with an average of 1.3% performance gain. The complementary loss dramatically increases the model\u2019s performance, on an average of 5.6%, which demonstrates the effectiveness of the proposed modules. Table 3: The ablation study of the proposed modules, validated on the PartNet dataset. Note that the baseline (no modules are involved) represents ULIP-2. When only InfoNCE is adopted, a learnable MLP is followed to the pre-trained model. Self-distillation FCA Accuracy InfoNCE CL tokens CA N=128 N=64 N=16 \u00d7 \u00d7 \u00d7 \u00d7 40.6% 24.8% 9.0% \u2713 \u00d7 \u00d7 \u00d7 31.4% 22.2% 6.7% \u2713 \u00d7 \u2713 \u00d7 44.2% 39.6% 25.9% \u2713 \u00d7 \u2713 \u2713 45.4% 40.5% 27.5% \u2713 \u2713 \u2713 \u2713 53.2% 47.9% 29.2% Further validation of point cloud completion To further validate whether point completion is beneficial to super sparse point clouds, we also validate the combination of PCN+learnable tokens+InfoNCE and show the result in Tab. 4. The PCN results in a crucial performance decrease, especially with a short number of points. This result demonstrates that the PCN is not able to perform robust point completion, and might overfit on the training samples, which leads to the loss of generality in zero-shot classification. Table 4: Further validation of point cloud completion for zero-shot super sparse point cloud classification on PartNet dataset. N=128 N=64 N=16 w PCN 49.7% 37.0% 15.1% w/o PCN 53.2% 47.9% 30.2% Number of learnable tokens We also validate the influence of the number of learnable tokens, without the cross attention, on the UnSeen categories of PartNet. As shown in Tab. 5, increasing the number of learnable tokens benefits the performance, but the improvement becomes marginal with the number getting larger. Table 5: The influence of the number of learnable tokens. Increasing the number of learnable tokens benefits the model performance, but the improvement is getting marginal with N getting large. Num of Tokens Num of Points Mean 128 64 16 4 32.0 21.9 7.3 20.4 8 36.6 31.3 18.6 28.8 12 43.0 35.1 20.8 33.0 24 41.2 36.6 25.6 34.5 Down-sampling strategies Except for the random down-sampling, we also validate the model performance of the KNN down-sampling strategy. This strategy selects a random center point and then samples the K nearest points to 8 \fRunning Title for Header the center point. The result is shown in Tab. 6, which demonstrates that the proposed approach could generalize on different down-sampling strategies. Table 6: The influence of different down-sampling strategies during the test phase on the ModelNet40 dataset. Compared with the ULIP-2 and its enhancement via pseudo labels, the proposed approach achieves a significant improvement. Model N=128 N=64 N=16 Mean ULIP-2 27.7 13.2 3.3 14.7 PL 29.9 17.7 4.0 17.2 Ours 38.0 26.5 6.6 23.7 5 Conclusion In this work, we raise the issue of zero-shot super sparse point cloud classification for the first time and propose a simple yet effective unsupervised training schema that effectively enhances the zero-shot classification ability of super sparse point clouds. We propose a learnable FCA to modify the latent space of the point cloud encoder while maintaining its alignment with text embeddings. We also propose a complementary learning-based self-distillation approach that leverages the training labels without overfitting the training text embeddings. Explicit experiments demonstrate the effectiveness of the proposed approach, which dramatically increases the zero-shot classification performance on the super sparse point clouds."
|
| 32 |
+
},
|
| 33 |
+
{
|
| 34 |
+
"url": "http://arxiv.org/abs/2301.06013v1",
|
| 35 |
+
"title": "Rethinking Precision of Pseudo Label: Test-Time Adaptation via Complementary Learning",
|
| 36 |
+
"abstract": "In this work, we propose a novel complementary learning approach to enhance\ntest-time adaptation (TTA), which has been proven to exhibit good performance\non testing data with distribution shifts such as corruptions. In test-time\nadaptation tasks, information from the source domain is typically unavailable\nand the model has to be optimized without supervision for test-time samples.\nHence, usual methods assign labels for unannotated data with the prediction by\na well-trained source model in an unsupervised learning framework. Previous\nstudies have employed unsupervised objectives, such as the entropy of model\npredictions, as optimization targets to effectively learn features for\ntest-time samples. However, the performance of the model is easily compromised\nby the quality of pseudo-labels, since inaccuracies in pseudo-labels introduce\nnoise to the model. Therefore, we propose to leverage the \"less probable\ncategories\" to decrease the risk of incorrect pseudo-labeling. The\ncomplementary label is introduced to designate these categories. We highlight\nthat the risk function of complementary labels agrees with their Vanilla loss\nformula under the conventional true label distribution. Experiments show that\nthe proposed learning algorithm achieves state-of-the-art performance on\ndifferent datasets and experiment settings.",
|
| 37 |
+
"authors": "Jiayi Han, Longbin Zeng, Liang Du, Weiyang Ding, Jianfeng Feng",
|
| 38 |
+
"published": "2023-01-15",
|
| 39 |
+
"updated": "2023-01-15",
|
| 40 |
+
"primary_cat": "cs.CV",
|
| 41 |
+
"cats": [
|
| 42 |
+
"cs.CV"
|
| 43 |
+
],
|
| 44 |
+
"main_content": "Introduction Deep-learning techniques have demonstrated exceptional performance when trained and evaluated on data from the same distribution. Nevertheless, this performance may not generalize well to unseen data with distribution shifts, for instance, image corruption. However, generalization to diverse data shifts is restricted due to the infeasibility of incorporating a suf\ufb01cient number of augmentations during training to account for the wide range of possible data shifts [Mintun et al., 2021]. An effective technique to transfer the model to a new related data domain is required, known as domain adaptation. \u2217\u2020These authors contribute equally. Figure 1: The accuracy of the positive pseudo labels and complementary ones in testing-time adaptation. The negative ones make fewer mistakes in the prediction of correct labels. P.Label and C.Label represent the pseudo label and complementary label, respectively. In this work, we focus on the problem of fully test-time adaptation (TTA), where the source data is not available during adapting to unlabeled test data. We only access the trained model in the source domain and update the parameters via a few optimization steps on an unsupervised objective involving the test samples from the target distribution. Different works exist to improve the model\u2019s performance during the testing procedure. [Liang et al., 2020] learns the target-speci\ufb01c feature extraction by exploiting both information maximization and self-supervised pseudo-labeling to implicitly align representations from the target domains to the source. [Wang et al., 2021] reuse the trained model to represent the label distribution of testing data and propose to minimize the entropy loss to maximize the prediction con\ufb01dence during adaptation. [Mummadi et al., 2021] extend the work of Wang [Wang et al., 2021] by introducing a novel loss function and prepending a parameterized input transformation module, effectively improving the robustness. [Zhang et al., 2021] demonstrates that deep models are capable of converging to incorrect labels. Therefore, the approaches mentioned above have a common drawback of requiring a suf\ufb01cient reliable pseudo label of unseen target testing data. Given a decision function in multi-class classi\ufb01cation, identifying a class label that is incorrect for the new coming instance is often much convinced than identifying the actual label. As shown in Fig. 1, the predictions of complementary labels are much more accurate than the naive (positive) pseudo labels. The label that \ufb01gures out the categories that the sample does not belong to is called a complementary label. In Fig. 2, we give an example of a Complementary label. Complementary labels carry useful information and are arXiv:2301.06013v1 [cs.CV] 15 Jan 2023 \fFigure 2: An illustration of different types of labels. The right picture is the input sample. \u201cPrediction\u201d represents the predicted probability distribution. Pseudo label, soft label, complementary label, and soft complementary label are generated accordingly. validated in comprehensive experiments on several benchmark datasets [Yu et al., 2018]. We could mitigate the abovementioned error caused by incorrect annotating by introducing a novel learning strategy with complementary labels instead of directly assigning a pseudo label to take the place of the inaccessible label. Complementary labels are easily obtainable, especially in the TTA we can reuse the source model to specify the least probable categories. Optimizing the model by tuning the decision function to suppress the less possible complementary labels makes sense. In the context of the forward decision problem, complementary labels provide limited information, however, they signi\ufb01cantly mitigate the negative effects of high-con\ufb01dence false tags on the model. Motivated by the pseudo-labeling method in Tent [Wang et al., 2021], we here model the learning procedure of annotating complementary labels via probabilities. In this work, we propose a complementary learning (CL) framework in TTA without any knowledge of source data. Speci\ufb01cally, we normalize the predicted distribution of the trained source model and then assign the complementary distribution. Then, we modify standard loss functions proposed for learning with actual labels so that the modi\ufb01cations can be employed to learn with complementary labels ef\ufb01ciently. We highlight that this proposed risk function agrees with vanilla learning with ordinary labels and rapidly converges to an optimal one. Moreover, we also empirically demonstrate its effectiveness by exploiting examples in fully test-time adaptation problems. The main contributions of this paper are listed as follows: 1. To the best of our knowledge, this is the \ufb01rst work that proposes to utilize the complementary labels on the TTA task. 2. The proposed risk function theoretically ensures that the classi\ufb01er learned with complementary labels converges to an optimal hypothesis. 3. Our proposed algorithm reaches state-of-the-art performance on different datasets and experiment settings. Extensive evaluations demonstrate the effectiveness of the proposed method. 2 Related Work 2.1 Model Training with Noisy Labels Training model with noisy labels is an important issue in deep-learning tasks, as incorrect labels are inevitable, and models have the ability to over\ufb01t on noisy data [Zhang et al., 2021]. A basic approach is to purify the labels. [Wu et al., 2020] proposes to \ufb01lter the clean samples via their topological information in the latent space. Similarly, [Kim et al., 2021] proposes to \ufb01lter the noisy samples according to the eigenvectors. [Xia et al., 2021] assumes that the volume of loss could be a clue of clean data and utilize interval estimation to \ufb01nd the most probable noisy labels. However, directly eliminating the samples with noisy labels reduces the size of the available data. To explicitly utilize the dataset, some approaches propose refurbishing the noisy labels [Zheng et al., 2020; Wang et al., 2020; Chen et al., 2021]. These approaches managed to detect the samples with incorrect labels and replace the annotation with the model\u2019s prediction. 2.2 Unsupervised Domain Adaption (UDA) and TTA UDA is a similar task to TTA. UDA aims to improve a model trained on the source domain and validate on the target domain. There are two main differences between UDA and TTA: First, UDA is generally an of\ufb02ine training strategy that allows collecting the whole target dataset for of\ufb02ine \ufb01netuning. Opposite to UDA, TTA can only see the current and the past mini-batches. Second, knowledge of the source domain is allowed to be accessible to UDA models, which is not supposed to be involved in TTA [Wang et al., 2021]. For UDA, since the training data in the source domain are supposed to be available, many works optimize the model via two tasks: the main classi\ufb01cation task supervised by data from the source domain, and the feature alignment task that minimizes the gap of feature distribution of data from both domains [Long et al., 2013; Chen et al., 2019]. [Liu et al., 2018] disentangles the feature representations from the source and target domains with source-domain supervision. For TTA, a basic approach is to introduce pseudolabeling. To further involve the con\ufb01dence, [Wang et al., 2021] suggests using simple Shannon entropy H = \u2212P c p(yc)log(p(yc)) as the target of optimization other than the standard pseudo label. Meanwhile, they should solely optimize the batch normalization layer to avoid the model collapsing. [Wang et al., 2022] proposes to utilize test-time augmentation to generate more reliable soft pseudo labels and \ufb01t the model to the labels by cross-entropy loss. Different from [Wang et al., 2021], they propose that the environment is constantly evolving and the model must adapt to these changes over time. In this work, we conduct experiments on both settings to demonstrate the effectiveness of the proposed complementary learning. 3 Motivation Analysis To illustrate our motivation, we start by calculating the accuracy of the pseudo label and complimentary label. Consider a well-trained classi\ufb01er f. Assume f(x) is an ideal estimation of the probability that x belongs to each category. If f(x)c < \u03b8c, we assume that x may not belongs to the cth category, so the cth category could be viewed as a negative category. We call the set of negative categories \u201ccomplementary label\u201d. Pseudo-labeling assumes that the most probable category is the true category so that when f(x) is an ideal estimation, the probability of the pseudo label being the ground truth is P = f(x)max. Then we de\ufb01ne the accuracy of com\fplementary labels. Since there could be more than one negative category for each sample, if the ground truth category is not involved in the complementary label of x, we name it a \u201ccorrect complementary label\u201d. The probability Pcl of the complementary label being correct could be calculated as Pcl = Y c\u2208\u00af yx (1 \u2212f(x)c) \u2265 Y c\u2208\u00af yx (1 \u2212\u03b8c) \u2265(1 \u2212\u0398)|\u00af yx| \u2265(1 \u2212\u0398)C\u22121. (1) in which C is the number of categories, \u00af yx represents the set of the negative categories of sample x \ufb01ltered by the thresholds, and \u0398 represents the largest threshold of all categories. According to Equ. 1, when \u0398 < 1 \u2212f 1 C\u22121 max , Pcl > P. That is to say, to a given sample, if the threshold is small enough, the accuracy of the complementary label is higher than it of the pseudo label. 4 Methods 4.1 A Basic Approach A direct approach to utilizing the complementary label is \ufb01nding the negative categories and minimizing the predicted probability of these selected negative categories. We only minimize the predicted probabilities on the negative categories and ignore the non-negative ones. Speci\ufb01cally, given model f, which is well-trained on the source domain, and a batch of data D = {xi}N i=1, we optimize the model according to the following objective function: L = \u22121 N\u00d7C P x\u2208D C P c=1 \u03b4(\u03b8c > f(x)c)p log(p), p = 1 \u2212f(x)c, (2) in which C is the number of categories and \u03b8c is a chosen threshold to \ufb01lter the complementary label c. The binarized function \u201c\u03b4(\u00b7)\u201d returns a value of 1 if the enclosed assertion is true and a value of 0 otherwise. In the rest of the paper, we call the basic approach BCL (basic complementary learning). 4.2 Learning with Con\ufb01dence If the probability that a sample belongs to a certain category is below a certain threshold, that category is considered the negative category for that sample. However, if the probability is signi\ufb01cantly lower than the threshold, the certain category is a negative category with much con\ufb01dence, which may not have been adequately modeled by the above-mentioned basic method. To address this issue, we enhance the BCL approach by incorporating con\ufb01dence. We call the enhanced version ECL (enhanced complementary learning). In the framework of learning input-output relationship, the trained data samples (xi, yi) (input-output pairs) are drawn from an underlying distribution p\u2217(x, y). A well-trained model could specify a conditional distribution p\u03b8(y|x) for a given input x with a set of possible parameters \u03b8. The joint distribution of input-output pairs is achieved, i.e, p\u03b8(x, y) = p\u03b8(y|x)p\u2217(x), where p\u2217(x) = R p\u2217(x, y)dy is the marginal distribution of the input. The goal is to minimize the KL divergence between the true distribution p\u2217(x, y) and the model prediction p\u03b8(x, y), which is equivalent to maximum likelihood, i.e, by minimizing the following the risk function: R(\u03b8) = \u27e8\u2212log p\u03b8(x, y)\u27e9p\u2217= \u27e8\u2212log p\u03b8(y|x)\u27e9p\u2217+const, (3) where \u27e8\u00b7\u27e9p\u2217denotes the expection over true data distribution. In the usual classi\ufb01cation case with ordinarily labeled data at hand, approximating the risk empirically is straightforward, R(\u03b8) = 1 n n X i=1 \u2212log p\u03b8(yi|xi). (4) Here, n is the number of instances in the available dataset, and yi is the given class label for the i-th instance. The equality holds due to the following two facts:1) the conditional distribution of class label q\u2217(y|x) for every training instance is a normalized delta function; 2) the number of data instances in the real world is generally \ufb01nite. Consider in this special case of closed-set test-time adaptation problem, we have already obtained a trained model on the source domain and have to adapt it to the new incoming data without accessing the ground truth annotation. Herein, we propose a framework of complementary label learning with a consistent risk, which enables the model of its learning objective to agree with minimizing the real classi\ufb01cation risk in the unseen dataset. Assigning the pseudo labels The most challenging problem of TTA is how to represent the labels of instances in the target domain. Obviously, the most direct way to produce pseudo labels is to assign the most probable class by reusing the knowledge f src(\u00b7) learned from the source domain. The decision function f src(xi) : X \u2192RK is parameterized by the output layer of a deep network where K is the number of classes. The decision function suggests a temporary appropriate data distribution \u02c6 p\u2217(y|x = xi) = f src(xi). For the ordinary one-versus-zero label distribution, the general pseudo label of instance xi is ( 1 if y = arg max k ([f src(xi)k]), 0 else, (5) where []k denotes the k-th element. However, this implementation of pseudo labels requires enough con\ufb01dence in decision function f src(\u00b7). If fed with too much incorrect information, the model\u2019s performance may be severely damaged. The one-versus-zero label distribution seems to be greatly demanding, resulting in the model easily suffering extremely sensitive errors. Herein, we propose a complementary label density to formulate the soft complementary labels (CLs). Similarly, we reuse the decision classi\ufb01er f src as a prior label distribution \u02c6 p\u2217and then \ufb01lter it with a prede\ufb01ned threshold to obtain the distribution of complementary labels. \u00af p\u2217(\u00af y|x = xi) = [\u03b8 \u2212f src(xi)]T + \u03b8T e\u00af y \u00b7 e\u00af y, (6) where [\u00b7]+ represents the operator that remains the positive part else zero; \u03b8 denotes the maximum probability to \ufb01lter the \fcomplementary labels and ei the i-th unit vector. Note that we have ignored the negative probability of complementary labels for avoiding polluting the model and scale it with its corresponding threshold of each class. Assumption 1. We assume the conditional distribution \u02c6 p\u2217(y|x) is suf\ufb01ciently close to the true distribution p\u2217(y|x)in their functional space. It equally says that, learning with the distribution \u02c6 p\u2217in the target domain makes the corresponding classi\ufb01er f target return a similar performance on the data with the true distribution. Assumption 1 simply offers us a view that starting with prior estimation \u02c6 p\u2217(y|x) = f src(x) is feasible as Eq. 6. The conditional complementary label distribution is exactly re\ufb02ecting the probability that this instance xi does not belong to the class yi. Designing the complementary risk formula In the following, we will use the shorthand notation \u02c6 p\u2217and \u00af p\u2217to represent the data distribution \u02c6 p\u2217(y|x) and \u00af p\u2217(\u00af y|x). In the target domain, the marginal distribution p\u2217(x) is the same for the ordinary dataset and complementary dataset. So, we can rewrite Eq. 6 as: \u00af p\u2217= 1 \u03b8 \u25e6 \u0000\u03b8eT \u2212I \u0001 \u02c6 p\u2217, (7) where I is the unit matrix. Note that we have dropped the operator [\u00b7]+ in the derivation of Equ. 7 for a compact formula. Consequently, \u02c6 p\u2217= \u2212 \u0000I \u2212\u03b8eT \u0001\u22121 \u0398\u00af p\u2217 = \u2212 \u0012 I + 1 1 \u2212eT \u03b8\u03b8eT \u0013 \u0398\u00af p\u2217, (8) where \u0398 is diagonal matrix of vector \u03b8 and the second equality holds since the Sherman-Morrison formula. This conservation gives us an explicit relationship between estimated distribution \u02c6 p\u2217and our de\ufb01ned complementary distribution \u00af p\u2217. Finally, we derive the risk function in complementary data distribution \u00af p\u2217to agree with the data de\ufb01ned in ordinary data distribution \u02c6 p\u2217, \u02c6 R(\u03b8) = \u27e8\u2212log p\u03b8(y|x)\u27e9\u02c6 p\u2217 = eT [\u2212log p\u03b8(y|x)\u02c6 p\u2217(y|x)] e = eT \u0014 \u2212log p\u03b8(y|x) \u0012 \u2212I \u2212 1 1 \u2212eT \u03b8\u03b8eT \u0013 \u0398\u00af p\u2217 \u0015 e. (9) where the conditional distribution p\u03b8(y|x) is the optimization objective and it can be simply represented as decision function f test. It is easy to implement by incorporating the de\ufb01nition of the complementary label in Equ. 6 as \u00af R(\u03b8) = 1 N X xi X \u00af y|\u00af p\u2217>0 \u2212[\u03b8\u00af y\u2217\u2212f src(xi)\u00af y]+ \u03b8\u00af y \uf8ee \uf8f0\u03b8\u00af y[logf test(xi)]\u00af y + 1 1 \u2212eT \u03b8\u03b8\u00af y X j \u03b8i[logf test(xi)]j \uf8f9 \uf8fb. (10) We have recovered the original de\ufb01nition of Equ. 6 in the second summation of Equ. 10. This algorithm only computes the loss of valid negative categories, avoiding the in\ufb02uence of ambiguous and uncertain instances. Thresholding strategy. To \ufb01nd the proper threshold \u03b8 introduced in the past sections, we employ two thresholding techniques: a \ufb01xed thresholding strategy and a dynamic thresholding strategy. The \ufb01xed strategy involves treating the threshold as a hyperparameter and maintaining it constant across all categories during the TTA procedure. The dynamic strategy, on the other hand, adapts the threshold during the procedure in response to the probability distribution of past samples. To adapt the threshold during testing, we propose to store the output distributions in a memory bank, which is denoted as Q. We utilize the probability distribution of Q as the prior distribution of p(y). The threshold of the ith category \u03b8i is calculated as follows: \u03b8i = Percentile(Qi,:, t), (11) in which t is a hyperparameter. To a new batch of data, denote the output matrix as D \u2208[0, 1]N\u00d7C in which N represents the number of samples, and C represents the number of categories. We calculate the complementary label matrix \u02dc D as in Equ. 12: \u02dc Di,j = [Percentile(Qj,:,t)\u2212Di,j]+ Percentile(Qj,:,t) . (12) After each batch, we refresh Q via the current batch of data. First, we merge Q and D together. If |Q| > L in which L is the largest number of samples to be stored in Q, we delete the \ufb01rst |Q| \u2212L columns to meet |Q| \u2264L. |Q| represents the number of samples stored in Q currently. A brief illustration of this procedure is shown in Fig. 3. 5 Experiments 5.1 BCL with Known Complementary Label. We conduct an experiment to prove that the proposed complementary learning is capable of training a classi\ufb01er. We train a toy model with 5 convolution layers and 2 MLPs to classify the CIFAR-10 dataset. The baseline is trained with standard ground truth labels. We then turn to BCL, and gradually decrease the number of given negative categories N for each input sample. During the training process, the complementary labels are \ufb01xed. The result is shown in Tab. 1. The performance of BCL is strongly related to N because a small N results in a large uncertainty of the ground truth positive label. However, the model trained with BCL is still capable of learning an effective classi\ufb01er. N=4 N = 6 N = 8 baseline 64.9% 73.5% 78.4% 81.7% Table 1: BCL on CIFAR-10 with known negative labels. N represents how many negative labels of each sample are given. 5.2 TTA Implementation Details We follow the corresponding pre-processing and utilize the pre-trained model as in website1 for CIFAR-10 and website2 1https://github.com/huyvnphan/PyTorch CIFAR10 2https://github.com/weiaicunzai/pytorch-cifar100 \fFigure 3: A brief illustration of the proposed dynamic thresholding strategy. For each category, we \ufb01nd its threshold according to Q. We then combine the thresholds and the prediction of each sample to generate their complementary labels to train the model. After that, predictions are utilized to refresh the memory bank. for CIFAR-100. We utilize ResNet-18 for the CIFAR-10, and ResNet-50 for the CIFAR-100, respectively. In the following experiments, we follow the settings in [Wang et al., 2021] and [Wang et al., 2022]. We set the maximum length L of Q to 200 and the percentile p to 75%. We follow [Wang et al., 2021] to set the learning rate to 1e-3 for CIFAR-100-C. For CIFAR-10-C, the learning rate is set to 1e-4. 5.3 Comparison with SOTA Approaches To evaluate the proposed complementary learning, we try two different experiment settings. For the \u201cone at a time (OAAT)\u201d setting, we assume that we only need to face one corruption type. Therefore, after \ufb01netuning the model on each corruption type with severity 5, we refresh the model with the initial pre-trained model on CIFAR-10 and CIFAR-100, respectively. We show the results in Tab. 2 and 3. For the \u201cContinual\u201d setting, we assume that the corruption type is changing with time. In this setting, we don\u2019t refresh the model. We show the results in Tab. 4 and 5. \u201cMean\u201d represents the average performance under the involved corruption types. We bold the best performance under each type of corruption. It is worth noting that we choose the best thresholding strategy for BCL and ECL. A detailed comparison between the strategies could be found in the ablation study. The proposed approach achieves the best performance on both datasets and experiment settings, which demonstrates the effectiveness and robustness of the proposed complementary learning. 5.4 Ablation Study The order of corruption types. To evaluate the in\ufb02uence of corruption types, we randomly shuf\ufb02e the corruption types 5 times and calculate the average performance on CIFAR-10C. Note that we do not include the result in Tab. 4. The result is shown in Tab. 6. The proposed complementary learning shows the consistency of different corruption orders. Shuf\ufb02e Idx 1 2 3 Perf. (%) 80.87 80.92 81.38 Shuf\ufb02e Idx 4 5 Mean Perf. (%) 80.87 81.15 81.04\u00b10.20 Table 6: Performance of CL on shuf\ufb02ed CIFAR-10-C under \u201cContinual\u201d setting. Trained parameters. In all the above experiments, we follow Tent [Wang et al., 2021] to only \ufb01netune the parameters of batch normalization layers. Here we conduct an experiment on the trained parameters in TTA with BCL. We show the result in Tab. 7. \u201dBN\u201d \ufb01netunes batch normalization parameters only, \u201dFeature\u201d \ufb01netunes all model parameters except \ufb01nal FC layers, \u201dClassi\ufb01er\u201d \ufb01netunes \ufb01nal FC layers, and \u201dAll\u201d \ufb01netunes the entire model. Finetune the overall model has similar performance compared with only \ufb01netuning the BN layers. However, only \ufb01netuning the feature extractor or FC layers achieves much worse performance. Param BN Feature Classi\ufb01er All Perf.(%) 82.67 78.29 78.46 82.68 Table 7: Performance of BCL with different \ufb01netuned parameter groups on CIFAR-10-C under OAAT setting: \u201dBN\u201d \ufb01netunes batch normalization parameters only, \u201dFeature\u201d \ufb01netunes all model parameters except \ufb01nal FC layers, \u201dClassi\ufb01er\u201d \ufb01netunes \ufb01nal FC layers, and \u201dAll\u201d \ufb01netunes the entire model. The maximum length of the memory bank Q. We conduct an experiment to search for the best length L on all datasets and experiment settings. The result is shown in Tab. 8. Increasing L can slightly improve the performance of BCL generally since it provides a better distribution evaluation of each category. Detailed results for ECL are available in the supplementary material. However, since larger L takes longer time during training, we empirically set it to 200. Dataset Setting L=50 L=200 L=500 CIFAR-10-C OAAT 82.63 82.67 82.70 CIFAR-10-C Continual 79.95 80.50 80.57 CIFAR-100-C OAAT 65.55 65.73 65.76 CIFAR-100-C Continual 61.39 61.48 61.56 Table 8: Search the maximum length for L on different datasets and experiment settings. \fModel contrast gau noise impulse brightness saturate glass defocus spatter speckle elastic Source 19.77 36.87 20.65 87.98 84.63 57.64 65.41 71.33 48.14 77.33 BN 81.43 71.03 53.07 89.08 89.01 64.39 85.26 75.24 68.82 77.74 Tent[Wang et al., 2021] 84.75 76.50 60.04 90.34 90.15 69.38 87.34 79.52 77.17 80.30 CoTTA[Wang et al., 2022] 86.28 76.88 63.72 90.10 89.87 67.85 87.13 79.58 78.15 81.07 BCL(ours) 85.96 77.86 62.55 90.31 90.14 71.74 87.22 80.58 78.77 81.04 ECL(ours) 85.68 79.19 63.71 90.39 90.38 72.52 87.24 80.98 79.84 81.22 Model pixelate fog jpeg motion snow frost shot zoom gau blur Mean Source 69.34 65.35 80.52 69.17 79.99 71.33 45.50 78.24 58.66 62.52 BN 81.08 81.48 79.05 84.22 82.20 79.61 72.39 85.26 84.94 78.17 Tent[Wang et al., 2021] 85.35 85.57 81.51 85.73 84.80 82.32 79.50 88.06 86.80 81.85 CoTTA[Wang et al., 2022] 84.93 86.15 82.25 86.10 85.28 83.45 79.97 87.64 87.19 82.29 BCL(ours) 85.71 86.27 82.12 86.16 85.23 83.65 80.78 87.83 86.88 82.67 ECL(ours) 86.04 86.86 82.30 86.06 85.51 83.93 81.58 88.05 87.13 83.12 Table 2: Comparison with SOTA approaches of OAAT setting on CIFAR-10-C. Model contrast gau noise impulse brightness saturate glass defocus spatter speckle elastic Source 16.28 12.72 7.15 65.23 56.37 46.90 33.81 60.85 15.66 57.91 BN 63.18 41.05 39.63 66.94 66.05 52.97 66.01 64.36 40.90 60.04 Tent[Wang et al., 2021] 66.07 53.26 51.32 71.92 69.87 56.82 69.03 72.16 53.43 63.77 CoTTA[Wang et al., 2022] 63.72 41.31 40.13 67.44 66.09 53.51 66.62 65.02 40.96 60.84 BCL(ours) 69.76 55.33 54.19 73.08 72.26 61.02 71.87 73.35 57.32 65.49 ECL(ours) 69.67 55.58 54.32 73.79 72.17 61.27 71.81 73.54 57.85 65.68 Model pixelate fog jpeg motion snow frost shot zoom gau blur Mean Source 33.42 38.28 48.20 45.08 50.70 43.11 14.57 45.17 26.18 37.77 BN 61.69 56.76 49.23 64.24 55.17 55.77 41.96 66.47 64.52 56.68 Tent[Wang et al., 2021] 67.28 64.69 56.30 67.60 63.12 62.26 55.56 69.75 69.17 63.34 CoTTA[Wang et al., 2022] 62.04 56.87 49.22 64.39 55.36 56.23 42.35 67.31 65.19 57.08 BCL(ours) 69.56 66.66 58.69 69.73 65.03 64.47 57.59 71.46 71.95 65.73 ECL(ours) 68.91 66.61 58.22 69.76 65.60 64.80 58.06 71.61 71.36 65.82 Table 3: Comparison with SOTA approaches of OAAT setting on CIFAR-100-C. Model saturate gau blur glass defocus spatter speckle elastic pixelate contrast gau noise Order \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2192 Source 84.63 58.66 57.64 65.41 71.33 48.14 77.33 69.34 19.77 36.87 BN 89.01 84.94 64.39 85.26 75.24 68.82 77.74 81.08 81.43 71.03 Tent[Wang et al., 2021] 89.95 86.34 68.66 86.44 77.04 72.66 77.80 82.41 77.74 71.28 CoTTA[Wang et al., 2022] 88.08 85.63 71.40 82.83 76.93 74.53 75.79 79.94 82.50 74.52 BCL(ours) 90.04 86.94 70.65 85.82 78.81 75.68 79.92 83.89 81.61 76.00 ECL(ours) 90.05 86.97 70.57 86.42 78.65 76.01 80.22 84.25 81.52 76.34 Model zoom shot impulse fog frost snow motion jpeg brightness Mean Order \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2192 Source 78.24 45.50 20.65 65.35 71.33 79.99 69.17 80.52 87.98 62.51 BN 85.26 72.39 53.07 81.48 79.61 82.20 84.22 79.05 89.08 78.17 Tent[Wang et al., 2021] 82.22 71.29 54.19 75.65 74.43 75.06 75.71 73.80 81.47 76.53 CoTTA[Wang et al., 2022] 81.73 74.72 62.96 69.74 69.95 70.89 71.93 66.15 69.11 75.23 BCL(ours) 86.10 78.90 62.41 80.13 80.42 81.69 82.53 80.43 87.62 80.50 ECL(ours) 86.21 79.03 62.50 79.85 80.30 82.06 82.58 80.66 87.88 80.63 Table 4: Continual test-time adaptation on CIFAR-10-C. \fModel saturate gau blur glass defocus spatter speckle elastic pixelate contrast gau noise Order \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2192 Source 56.37 26.18 46.90 33.81 60.85 15.66 57.91 33.42 16.28 12.72 BN 66.05 64.52 52.97 66.01 64.36 40.90 60.04 61.69 63.18 41.05 Tent[Wang et al., 2021] 71.98 70.99 59.90 71.33 70.80 53.32 62.68 65.93 63.36 51.27 CoTTA[Wang et al., 2022] 66.19 65.20 53.14 66.48 64.95 41.25 60.49 61.98 63.51 41.63 BCL(ours) 71.09 71.04 60.25 70.20 69.23 56.30 63.24 66.09 61.30 54.21 ECL(ours) 70.26 69.93 61.35 72.17 71.59 57.85 66.45 69.09 66.07 56.72 Model zoom shot impulse fog frost snow motion jpeg brightness Mean Order \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2212 \u2192 Source 45.17 14.57 7.15 38.28 43.11 50.70 45.08 48.20 65.23 37.76 BN 66.47 41.96 39.63 56.76 55.77 55.17 64.24 49.23 66.94 56.68 Tent[Wang et al., 2021] 65.50 51.88 44.60 51.20 52.73 50.22 54.30 45.72 55.52 58.59 CoTTA[Wang et al., 2022] 67.72 42.47 40.37 57.18 56.68 56.04 64.67 50.14 68.03 57.26 BCL(ours) 66.63 56.57 50.05 55.05 58.94 57.06 61.36 54.55 64.96 61.48 ECL(ours) 70.90 59.91 54.17 60.30 63.67 61.25 66.12 58.48 69.68 64.52 Table 5: Continual test-time adaptation on CIFAR-100-C. Comparison with naive pseudo-labeling. We evaluate both ECL and naive pseudo-labeling (NPL) on CIFAR-10-C and CIFAR-100-C under both experiment settings. The result is shown in Tab. 9. The proposed ECL outperforms NPL in both settings, which suggests that ECL provides a more robust pseudo-supervision for the model to learn from. Model Setting Perf (%) NPL OAAT 70.81 ECL OAAT 74.20 NPL Continual 69.38 ECL Continual 72.56 Table 9: Comparison between ECL and NPL on both datasets and settings. Performance is the average of CIFAR-10-C and CIFAR100-C. Comparison of thresholding approaches. To validate the proposed thresholding approach, we compare it with \ufb01xed thresholds. For CIFAR-10 and CIFAR-100, we set the \ufb01xed threshold as 5e \u22122 and 5e \u22123, respectively. The result is shown in Tab. 10. The result shows that the proposed thresholding works well both under the OAAT setting and the Continual setting, while the \ufb01xed thresholding failed for continual settings. This result shows that the dynamic threshold helps the model to optimize smoothly. \u201cFailed\u201d represents under the \u201cContinual\u201d setting, the model is unable to correctly predict the categories of the samples in later types of corruptions. Dataset CIFAR-10-C CIFAR-100-C Setting Continual OAAT Continual OAAT BCL-\ufb01xed 45.66 82.96 failed 63.69 BCL-dynamic 80.50 82.67 61.48 65.73 ECL-\ufb01xed 79.76 83.12 failed 65.82 ECL-dynamic 80.63 82.08 64.52 65.42 Table 10: Comparison between the proposed thresholding (dynamic) and \ufb01xed thresholding. Validation of batch size. We further validate the effect of different batch sizes conditioned on ECL with dynamic thresholding, and the result is shown in Fig. 4. When the batch size is small, the TTA performance could be signi\ufb01cantly affected by it. When the batch size is larger than 64, it becomes less important to the \ufb01nal performance. Figure 4: The effect of different batch sizes on ECL with dynamic thresholding. 6 Conclusion In this work, we propose a TTA framework, named complementary learning, that explicitly takes the advantage of the non-maximal categories of the input samples. We start with descriptions of complementary labels and why are complementary labels more accurate than pseudo labels. Then we give a basic but effective approach to utilizing the complementary labels. We further introduce con\ufb01dence to the basic formulation to give a more effective one. To dynamically adapt the threshold, we propose a simple queue-based approach. Experiments on different experiment settings (continual and OAAT) and datasets (CIFAR-10 and CIFAR-100) show that the proposed approach achieves state-of-the-art performance on test-time adaptation tasks."
|
| 45 |
+
}
|
| 46 |
+
]
|
| 47 |
+
},
|
| 48 |
+
"edge_feat": {}
|
| 49 |
+
}
|
| 50 |
+
}
|
title_31K_G/test_title_long_2404.19696v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19708v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19715v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19739v1.json
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"url": "http://arxiv.org/abs/2404.19739v1",
|
| 3 |
+
"title": "Mixed Continuous and Categorical Flow Matching for 3D De Novo Molecule Generation",
|
| 4 |
+
"abstract": "Deep generative models that produce novel molecular structures have the\npotential to facilitate chemical discovery. Diffusion models currently achieve\nstate of the art performance for 3D molecule generation. In this work, we\nexplore the use of flow matching, a recently proposed generative modeling\nframework that generalizes diffusion models, for the task of de novo molecule\ngeneration. Flow matching provides flexibility in model design; however, the\nframework is predicated on the assumption of continuously-valued data. 3D de\nnovo molecule generation requires jointly sampling continuous and categorical\nvariables such as atom position and atom type. We extend the flow matching\nframework to categorical data by constructing flows that are constrained to\nexist on a continuous representation of categorical data known as the\nprobability simplex. We call this extension SimplexFlow. We explore the use of\nSimplexFlow for de novo molecule generation. However, we find that, in\npractice, a simpler approach that makes no accommodations for the categorical\nnature of the data yields equivalent or superior performance. As a result of\nthese experiments, we present FlowMol, a flow matching model for 3D de novo\ngenerative model that achieves improved performance over prior flow matching\nmethods, and we raise important questions about the design of prior\ndistributions for achieving strong performance in flow matching models. Code\nand trained models for reproducing this work are available at\nhttps://github.com/dunni3/FlowMol",
|
| 5 |
+
"authors": "Ian Dunn, David Ryan Koes",
|
| 6 |
+
"published": "2024-04-30",
|
| 7 |
+
"updated": "2024-04-30",
|
| 8 |
+
"primary_cat": "q-bio.BM",
|
| 9 |
+
"cats": [
|
| 10 |
+
"q-bio.BM",
|
| 11 |
+
"cs.LG"
|
| 12 |
+
],
|
| 13 |
+
"label": "Original Paper",
|
| 14 |
+
"paper_cat": "Diffusion AND Model",
|
| 15 |
+
"gt": "Mixed Continuous and Categorical Flow Matching for 3D De Novo Molecule Generation",
|
| 16 |
+
"main_content": "Introduction Deep generative models that can directly sample molecular structures with desired properties have the potential to accelerate chemical discovery by reducing or eliminating the need to engage in resource-intensive screening-based based discovery paradigms. Moreover, generative models may also improve chemical discovery by enabling multi-objective design of chemical matter. In pursuit of this idea, there has been recent interest in developing generative models for the design of small-molecule therapeutics [1\u20138], proteins [9\u201311], and materials [12]. State of the art performance in these tasks is presently achieved by applying diffusion models [13\u201315] to point cloud representations of molecular structures. Flow matching, a recently proposed generative modeling framework [16\u201319], generalizes diffusion models. Under diffusion models, the transformation of prior samples to data is formulated as a reversal of a predefined forward process. The forward process is a Markov chain or differential equation that must converge to a tractable stationary distribution as t \u2192\u221e; this requirement constrains the viable options for forward/reverse processes and prior distributions. In contrast, flow matching prescribes a method for directly learning a differential equation that maps samples from nearly arbitrary distributions. In doing so, flow matching permits valuable flexibility when designing models for specific applications. For example, Jing et al. [20] and St\u00e4rk et al. [21] make use of the fact that flow matching allows arbitrary prior distributions to design models whose priors are closer to realistic 3D molecular conformations than a Gaussian prior. In this work we explore the application of flow matching to 3D de novo small molecule generation. We adapt the approach of state of the art diffusion models for this task [22\u201324] to the flow matching framework. This approach entails predicting atom positions, atom types (chemical elements), formal charges, and bond orders between all pairs arXiv:2404.19739v1 [q-bio.BM] 30 Apr 2024 \fFigure 1: Overview of FlowMol Top: We adapt the flow matching framework for unconditional 3D molecule generation. An ordinary differential equation parameterized by a graph neural network transforms a prior distribution over atom positions, types, charges, and bond orders to the distribution of valid molecules. Black arrows show the instantaneous direction of the ODE on atom positions. Middle: Trajectory of the atom type vector for a single atom under SimplexFlow, a variant of flow matching developed for categorical variables. Atom type flows lie on the probability simplex. Bottom: Trajectory of an atom type vector starting from a Gaussian prior. This approach does not respect the categorical nature of the data; however, we find it yields superior performance to SimplexFlow. of atoms. All of these variables are categorical with the exception of atom positions. Therefore, molecule generation requires sampling from a joint distribution of continuous and categorical variables. Effectively adapting flow matching for this mixed continuous/categorical generative task may be non-trivial because the flow matching framework is predicated on the assumption of continuously valued data. In this work, we extend the flow matching framework to categorical data by constructing flows that are constrained to exist on a continuous representation of categorical data known as the probability simplex. We call this extension SimplexFlow. We present a model for de novo small-molecule generation that uses SimplexFlows to generate categorical features. This work was motivated by the intuition that designing a generative process that respects the categorical nature of the data it operates on may yield improved performance; however, our empirical results contradict this intuition. We show that in practice, a simpler approach that makes no accommodations for the categorical nature of the data yields superior performance to a de novo model using SimplexFlow. Our final flow matching model for molecule generation, FlowMol, achieves improved performance over existing flow matching methods for molecule generation and is competitive with state of the art diffusion models while exhibiting a >10-fold reduction in inference time. 2 Background 2.1 Discrete Diffusion The original formulation of diffusion models [13] was defined in terms of a Markov chain of random variables that converged to a tractable stationary distribution in the limit of an infinite number of steps in the Markov chain. This formulation made no assumptions about the sample space of the random variables modeled, allowing for natural extensions to discrete data [25\u201327]. A separate formulation of diffusion models as continuous-time stochastic differential equations (SDE)[15] became popular in the literature. The SDE formulation of diffusion models is dependent on the assumption of having continuously-valued data. Similar to our approach, there is a line of work developing SDE-based diffusion models that operate on continuous representations of discrete data. Several works developed diffusion models where diffusion trajectories were constrained to the simplex [28\u201330]. An alternative approach is to embed categorical features into a continuous latent space and train diffusion models on the embeddings [31]. 2 \f2.2 De Novo Molecule Generation Initial attempts at de novo molecule generation focused on generating either textual representations (SMILES strings) [32\u201334] or 2D molecular graphs [35\u201338]: molecular representations that exclude all information about 3D structure. Subsequent approaches were developed for 3D molecule generation using a variety of molecular representations and generative paradigms [39\u201343]. Hoogeboom et al. [44] proposed the first diffusion model for 3D molecule generation, which yielded superior performance over previous approaches. Molecules are represented in Hoogeboom et al. [44] by attributed point clouds where each atom has a position in space and type. A continuous diffusion process is defined for both atom positions and types where the prior for both is a standard Gaussian distribution. A purported weakness of this approach is that atom connectivity is not predicted by the model and must be inferred in a post-processing step. Several concurrent works sought to address these issues by predicting bond order in addition to atom positions/types: Huang et al. [22], Vignac et al. [23], Peng et al. [24], Hua et al. [45]. These models report substantially improved performance over Hoogeboom et al. [44]. Three of these four concurrent works (Vignac et al. [23], Peng et al. [24], Hua et al. [45]) use discrete diffusion processes for categorical features and attribute (in part) their improved model performance to the use of discrete diffusion.; however, only Peng et al. [24] presents an ablation study isolating the effect of discrete diffusion. Moreover, Huang et al. [22] uses only continuous diffusion processes and reports superior performance. This suggests that while predicting graph connectivity provides performance benefits, the utility of discrete diffusion for molecule generation is less clear. Vignac et al. [23] and Huang et al. [22] fully specify the molecular structure by also predicting atom formal charges and the presence of hydrogen atoms; for this reason, these works are the most similar to the model presented here. 2.3 Flow-Matching for De Novo Molecule Generation To our knowledge, Song et al. [46] is the only existing work that performs de novo molecule generation with flow matching. Molecules are represented as point clouds where each atom has a position in space and an atom type. The final molecule structure is inferred after the inference procedure. The prior distribution for atom type vectors is a standard Gaussian distribution, and so the generative process does not have any inductive biases to respect the discrete nature of the data. This work can be viewed as the flow matching analog of Hoogeboom et al. [44]. 2.4 Flow Matching for Discrete Data Concurrent work [47] developed a variant of flow matching on the simplex which we refer to as Dirichlet Flows. In Dirichlet Flows, conditional probability paths are only conditioned on x1 and, as a result, do not permit arbitrary choices of the prior and must use a uniform distribution over the simplex. In contrast, our formulation permits the use of any prior distribution. Stark et al. [47] identify problems with the choice of commonly used conditional vector fields that limit performance on variables with a large number of categories. They propose an alternative choice of conditional probability paths that alleviate this issue. There are also other works which develop flow matching variants for discrete data. Boll et al. [48] equip the simplex with the Fisher-Rao metric to form a Riemannian manifold, and apply Riemannian Flow Matching [49] to this manifold. Campbell et al. [50] develop a flow matching method for discrete data built on continuous-time Markov chains. Importantly, none of the aforementioned works, which present methods for training flow matching models for categorical data, benchmark their model performance against simpler flow matching models that do not account for the categorical nature of their data. 2.5 Flow Matching Flow matching [16\u201319] is a new generative modeling framework that generalizes diffusion models. Flow matching permits useful design flexibility in the choice of prior of and nature of the map between two distributions. Flow matching is also conceptually simpler than diffusion and permits substantially faster inference. We briefly describe the flow matching framework here. An ordinary differential equation (ODE) that exists on Rd is defined by a smooth, time-dependent vector-field u(x, t) : Rd \u00d7 [0, 1] \u2192Rd. dx dt = u(x, t) (1) 3 \fNote that we only consider this ODE on the time interval [0, 1]. For simplicity we will use ut(x) interchangeably with u(x, t). Given a probability distribution over initial positions x0 \u223cp0(x), the ODE (1) induces time dependent probability distributions pt(x). The objective in flow matching is to approximate a vector field ut(x) that pushes a source distribution p0(x) to a desired target distribution p1(x). A neural network u\u03b8 can be regressed to the vector field ut by minimizing the Flow Matching loss. LF M = Ex\u223cpt\u2225u\u03b8(x, t) \u2212ut(x)\u22252 (2) Computing LF M requires access to ut and pt, quantities that are typically intractable. Flow matching provides a method for approximating ut(x) without having access to it. If we consider the probability path pt(x) to be a mixture of conditional probability paths pt(x|z): pt(x) = Z pt(x|z)p(z)dz (3) and we know the form of the the conditional vector fields ut(x|z) that produce pt(x|z), then the marginal vector field ut(x) can be defined as a mixture of conditional vector fields: ut(x) = Ep(z) ut(x|z)pt(x|z) pt(x) (4) We still cannot compute ut(x) but the neural network u\u03b8 that is the minimizer of LF M is also the minimizer of the Conditional Flow Matching (CFM) loss defined in (5) LCF M = Ep(z),pt(x|z),t\u223cU(0,1)\u2225u\u03b8(x, t) \u2212ut(x|z)\u22252 (5) That is, regressing to conditional vector fields, in expectation, is equivalent to regressing to the marginal vector field. The remaining design choices for a flow matching model are the choice of conditioning variable z, conditional probability paths pt(x|z), and conditional vector fields ut(x|z). 3 Methods 3.1 Problem Setting We represent a molecule with N atoms as a fully-connected graph. Each atom is a node in the graph. Every atom has a position in space X = {xi}N i=1 \u2208RN\u00d73, an atom type (in this case the atomic element) A = {ai}N i=1 \u2208RN\u00d7na, and a formal charge C = {ci}N i=1 \u2208RN\u00d7nc. Additionally, every pair of atoms has a bond order E = {eij\u2200i, j \u2208 [N]|i \u0338= j} \u2208R(N 2\u2212N)\u00d7ne. Where na, nc, ne are the number of possible atom types, charges, and bond orders; these are categorical variables represented by one-hot vectors. For brevity, we denote a molecule by the symbol g, which can be thought of as a tuple of the constituent data types g = (X, A, C, E). There is no closed-form expression or analytical technique for sampling the distribution of realistic molecules p(g). We seek to train a flow matching model to sample this distribution. Concretely, we choose the the target distribution that is the distribution of valid 3D molecules p1(g) = p(g). Our choice of prior p0(g) is described in Section 3.5. Our strategy for adapting flow matching to molecular structure is one that mimics prior work on applying diffusion and flow-based generative models to molecular structure. That is, we define conditional vector fields and conditional probability paths for each data modality and jointly regress one neural network for all data modalities. Our total loss is a weighted combination of CFM losses from (5): L = \u03b7XLX + \u03b7ALA + \u03b7CLC + \u03b7ELE (6) Where (\u03b7X, \u03b7A, \u03b7C, \u03b7E) are scalars weighting the relative contribution of each loss term. We set these values to (3, 0.4, 1, 2) as was done in Vignac et al. [23]. Our specific choice of conditional vector fields and probability paths is described in Section 3.2. In practice, we use a variant of the CFM objective called the endpoint-parameterized objective that we present in Section 3.3. These choices are used to in turn to design SimplexFlow, our method of performing flow matching for categorical variables, which is described in Section 3.4. 4 \f3.2 Flow Matching with Temporally Non-Linear Interpolants We choose the conditioning variable to be the initial and final states of a trajectory: z = (g0, g1). We choose the conditional probability path to be a Dirac density placed on a \u201cstraight\u201d line connecting these states pt(g|g0, g1) = \u03b4(g \u2212(1 \u2212\u03b1t)g0 \u2212\u03b1tg1). This particular choice of conditional vector fields and probability paths gives us the freedom to choose any prior distribution p0(g) [17, 18, 36]. Our choice of pt(g|g0, g1) is equivalent to defining a deterministic interpolant: gt = (1 \u2212\u03b1t)g0 + \u03b1tg1 (7) where \u03b1t : [0, 1] \u2192[0, 1] is a function that takes t as input and returns a value between 0 and 1. The rate at which a molecule from the prior distribution g0 is transformed into a valid molecule g1 can be controlled by choice of \u03b1t, which we name the \u201cinterpolant schedule.\u201d1 We define separate interpolant schedules for each data type comprising a molecule: \u03b1t = (\u03b1X t , \u03b1A t , \u03b1C t , \u03b1E t ). Taking inspiration from Vignac et al. [23], we define a cosine interpolant schedule: \u03b1t = 1 \u2212cos2 \u0010\u03c0 2 t\u03bd\u0011 (8) where different values of \u03bd are set for atom positions, types, charges, and bond orders. The interpolant (7) gives rise to conditional vector fields of the form: u(gt|g0, g1) = \u03b1\u2032 t(g1 \u2212g0) (9) Where \u03b1\u2032 t is the time derivative of \u03b1t. 3.3 Endpoint Parameterization By solving (7) for g0 and substituting this expression into (9) we obtain an alternate form of the conditional vector field. u(gt|g0, g1) = \u03b1\u2032 t 1 \u2212\u03b1t (g1 \u2212gt) (10) As described in Section 2.5, the typical flow matching procedure is to regress a neural network u\u03b8(gt) directly to conditional vector fields by minimizing the CFM loss (5). Instead, we apply a reparameterization initially proposed by Jing et al. [20]: u\u03b8(gt) = \u03b1\u2032 t 1 \u2212\u03b1t (\u02c6 g1(gt) \u2212gt) (11) By substituting (11) and (10) into (5), we obtain our endpoint-parameterized objective LEP = Et,gt \u0014 \u03b1\u2032 t 1 \u2212\u03b1t ||\u02c6 g1(gt) \u2212g1|| \u0015 (12) Therefore our objective becomes to train a neural network that predicts valid molecular structures given samples from a conditional probability path \u02c6 g1(gt). This is particularly advantageous when operating on categorical data, as placing a softmax layer on model outputs constrains the domain of model outputs to the simplex. Empirically, we find that the endpoint objective yields better performance than the vector field regression objective (5) for the task of molecule generation. Moreover, we leverage the theoretical guarantee that our predicted endpoint for categorical data lie on the simplex to ensure our flows lie on the simplex. In practice, the interpolant-dependent loss weight \u03b1\u2032 t 1\u2212\u03b1t produces unreasonably large values as \u03b1t \u21921. We replace this term with a time-dependent loss function inspired by Le et al. [51]: w(t) = min(max(0.005, \u03b1t 1\u2212\u03b1t ), 1.5). For categorical variables we use a cross entropy loss rather than the L2 norm shown in (12). 1This is intended to be analogous to noise schedules for diffusion models. 5 \f3.4 SimplexFlow To design flow matching for categorical data, our strategy is to define a continuous representation of categorical variables, and then construct a flow matching model where flows are constrained to this representation. We choose the d-dimensional probability simplex Sd as the continuous representation of a d-categorical variable. Sd = \b x \u2208Rd|xi > 0, 1 \u00b7 x = 1 \t (13) A d-categorical variable x1 \u2208{1, 2, . . . , d} can be converted to a point on Sd via one-hot encoding. Correspondingly, the categorical distribution p1(x) = C(q) can be converted to a distribution on Sd as: p1(x) = d X i=1 qi\u03b4(x \u2212ei) (14) where ei is the ith vertex of the simplex and qi is the probability of x belonging to the ith category. If we choose a prior distribution p0(x) such that supp(p0) = Sd, then all conditional probability paths produced by the interpolant (7) will lie on the simplex. This is because the simplex is closed under linear interpolation (see Appendix A) and the conditional trajectories are obtained by linearly interpolating between two points on the simplex (x0, x1 \u2208Sd). Although choosing (p0, p1) with support on the simplex results in conditional trajectories on the simplex, training a flow under the vector field objective (5) provides no guarantee that trajectories produced by the learned vector field lie on the simplex. However, training a flow matching model under the endpoint parameterization (Section 3.3) enables us to guarantee by construction that generated flows lie on the simplex; proof of this is provided in Appendix B. 3.5 Priors We define the prior distribution for a molecule as a composition of independent samples for each atom and pair of atoms. Our prior distributions take the form: p0(g) = p0(x, a, c, e) = Natoms Y i=1 p0(xi)p0(ai)p0(ci) \u00d7 Natoms Y i,j<i p0(eij) (15) Our choice of conditional trajectory (7) permits the choice of any prior distribution. SimplexFlow places the constraint that the prior distribution for categorical variables have support bounded to the simplex. We always set p0(xi) = N(xi|0, I); atom positions are independently sampled from a standard Gaussian distribution. We explore the use of several prior distributions for categorical variables ai, ci, eij. We experiment with three different categorical priors for SimplexFlow. The uniform-simplex prior is a uniform distribution over the simplex; the simplest choice for a categorical prior. This choice is analogous to the \u201cLinear FM\u201d model described in [47]. The marginalsimplex prior is designed to be \u201ccloser\u201d to the data distribution by using marginal distributions observed in the training data. Specifically, we replace p0(ai)p0(ci) and p0(eij) in (15) with p1(ai, ci) and p1(eij), respectively. Finally, for the barycenter prior, categorical variables are placed at the barycenter of the simplex; the point in the center of the simplex assigning equal probability to all categories. The intuition behind the barycenter prior is all categorical variables will be \u201cundecided\u201d at t = 0. In practice, the model fails when the prior distributions for categorical variables only have density on a small, fixed number of points on the simplex; this is the case for the marginal-simplex and barycenter priors. We find that \"blurring\" the prior samples for categorical variables significantly improves performance. That is, Gaussian noise is added to the samples before they are projected back onto the simplex. 3.6 Optimal Transport Alignment Previous work [17] has shown that aligning prior and target samples via optimal transport significantly improves the performance of flow matching by minimizing the extent to which conditional trajectories intersect. When performing flow matching on molecular structure, this consists of computing the optimal permutation of node ordering and the rigid-body alignment of atom positions [46, 52]. We apply the same alignment between target and prior positions at training time. This also ensures that prior positions p0(X) and target positions p1(X) effectively exist in the center of mass free subspace proposed in Hoogeboom et al. [44] that renders the target density p1(g) invariant to translations. 6 \fFigure 2: FlowMol Architecture Top left: An input molecular graph gt is transformed into a predicted final molecular graph g1 by being passed through multiple molelcule update blocks. Top right: A molecule update block uses NFU, NPU, and EFU sub-components to update all molecular features. Bottom: Update equations for graph features. \u03d5 and \u03c8 is used to denote MLPs and GVPs, respectively. 3.7 Model Architecture Molecules are treated as fully-connected graphs. The model is designed to accept a sample gt and predict the final destination molecule g1. Within the neural network, molecular features are grouped into node positions, node scalar features, node vector features, and edge features. Node positions are identical to atom positions discussed in Section 3.1. Node scalar features are a concatenation of atom type and atom charge. Node vector features are geometric vectors (vectors with rotation order 1) that are relative to the node position. Node vector features are initialized to zero vectors. Molecular features are iteratively updated by passing gt through several Molecule Update Blocks. A Molecule Update Block uses Geometric Vector Perceptrons (GVPs) [53] to handle vector features. Molecule Update Blocks are composed of three components: a node feature update (NFU), node position update (NPU) and edge feature update (EFU). The NFU uses a message-passing graph convolution to update node features. The NPU and EFU blocks are node and edge-wise operations, respectively. Following several molecule update blocks, predictions of the final categorical features ( \u02c6 A1, \u02c6 C1, \u02c6 E1) are generated by passing node and edge features through shallow node-wise and edge-wise multi layer perceptrons (MLPs). For models using endpoint parameterization, these MLPs include softmax activations. The model architecture is visualized in Figure 2 and explained in detail in Appendix D. In practice, graphs are directed. For every pair of atoms i, j there exists edges in both directions: i \u2192j and j \u2192i. When predicting the final bond orders \u02c6 E1 for an edge, we ensure that one prediction is made per pair of atoms and that this prediction is invariant to permutations of the atom indexing. This is accomplished by making our prediction from the sum of the learned bond features. That is, \u02c6 eij 1 = MLP(eij + eji). GVPs, as they were originally designed, predict vector quantities that are E(3)-equivariant. We introduce a variant of GVP that is made SE(3)-equivariant by the addition of cross product operations. The cross product is equivariant to rotations and translations of input vectors but not reflections. As a result, the learned density p1(g) is invariant to rotations and translations but not reflections. In other words, FlowMol is sensitive to chirality. Empirically we find that the addition of cross product operations to GVP improves performance. Schneuing et al. [3] proposed the addition of a cross product operation to the EGNN architecture [54]; we adopt this idea for GVP. We refer the reader to Appendix F of Schneuing et al. [3] for a detailed discussion of the equivariance of cross products. Our cross product variant of GVP is described in Appendix D.1. 7 \f4 Experiments 4.1 Datasets We train on QM9 [55, 56] and GEOM-Drugs [57] using explicit hydrogens. QM9 contains 124k small molecules, each with one 3D conformation. GEOM-Drugs contains approximately 300k larger, drug-like molecules with multiple conformers for each molecule. Molecules in QM9 have an average of 18 atoms a max of 29 while those in GEOM-Drugs have an average of 44 atoms and a max of 181. We use the same dataset splits as Vignac et al. [23]. We chose to use explicit hydrogens because it is a more difficult learning task. By predicting explicit hydrogens in combination with atom types, bond orders, and formal charges, there is a 1-to-1 mapping from model outputs to molecules. If any one of these components were removed from the generative task, one model output could plausibly be interpreted as multiple molecular structures, and so it is \u201ceasier\u201d for the model output to be interpreted as \u201ccorrect\u201d or \u201cvalid.\u201d We view the task of predicting graph topology and structure with explicit hydrogens and formal charges as the most rigorous evaluation of the capabilities of generative models to fit the distribution of valid molecular structures. 4.2 Model Evaluation We report three metrics measuring the validity of generated molecular topology: percent atoms stable, percent molecules stable, and percent molecules valid. An atom is defined as \u201cstable\u201d if it has valid valency. Atomic valency is defined as the sum of bond orders that an atom is participating atom. Aromatic bonds are assigned a bond order of 1.5. A valid valency is defined as any valency that is observed in the training data for atoms of a given element and formal charge. A molecule is counted as stable if all of its constituent atoms are stable. A molecule is considered \u201cvalid\u201d if it can be sanitized by rdkit [58] using default sanitization settings. Metrics regarding the validity of molecular topology fail to capture a model\u2019s ability to reproduce reasonable molecular geometries. Therefore, we also compute the Jensen-Shannon divergence of the distribution of potential energies for molecules in the training data and molecules sampled from trained models. Potential energies are obtained from the Merck Molecular Mechanics Force-Field implemented in rdkit [58]. Force-field energy cannot be obtained for molecules that cannot be sanitized by rdkit, and so the reported Jensen-Shannon divergences are for valid molecules only. Molecule quality metrics are reported for samples of 10, 000 molecules, repeated 5 times. We report inference time for FlowMol and baseline models. We measure inference time as the time required to generate one batch of 100 molecules on the same NVIDIA GeForce RTX 2060 GPU. This inference procedure is also repeated five times. Inference is run on FlowMol using Euler integration with 100 evenly-spaced timesteps. All results are reported with 95% confidence intervals. For all samplings, the number of atom in each molecule is sampled from the distribution of atoms in molecules from the training data. 4.3 Model Ablations We train multiple versions of our model to evaluate the effects of several aforementioned design choices. To observe the effect of endpoint reparameterization (sec 3.3), we train equivalent models with both the vector-field objective (5) and the endpoint objective (12). We train models using SimplexFlow with all three categorical priors proposed in Section 3.5 which have support on the simplex. To determine whether SimplexFlow improves performance, we also train models where the prior distribution for categorical features is a standard Gaussian distribution. In this setting, the generated flows are not constrained to the simplex, and it can be said that the flows do not \u201crespect\u201d the categorical nature of the data. This is similar to the atom type flows in Song et al. [46] and atom type diffusion in Hoogeboom et al. [44]. All of the mentioned model ablations are tested on the QM9 dataset and the results are presented in Section 5.1. A subset of these ablations were also performed on the GEOM dataset. GEOM ablations are available in Appendix E. None of the effects observed in GEOM ablations contradict those seen for QM9 ablations. For metrics reported in ablations, results are averaged over two identical models trained with different random seeds 4.4 Comparison to Dirichlet Flows We compare SimplexFlow to concurrent work that developed Dirichlet Flows [47] for flow matching on the simplex. Briefly, for a d-categorical variable x represented as a point on the simplex, the conditional probability path is pt(x|x1 = ei) = Dir(x|\u03b3 = 1 + ei\u03c9) (16) 8 \fTable 1: FlowMol Ablations on QM9 with explicit hydrogens Flow Type Categorical Prior Atoms Stable (%) (\u2191) Mols Stable (%) (\u2191) Mols Valid (%) (\u2191) JS(E) (\u2193) Dirichlet uniform-simplex 98.4\u00b10.0 80.0\u00b10.3 85.5\u00b10.3 0.15\u00b10.01 endpoint uniform-simplex 98.9\u00b10.1 84.2\u00b10.9 88.9\u00b10.6 0.11\u00b10.01 marginal-simplex 99.5\u00b10.1 91.9\u00b10.7 96.1\u00b10.2 0.06\u00b10.00 barycenter 99.5\u00b10.0 91.4\u00b10.5 93.6\u00b10.5 0.05\u00b10.00 Gaussian 99.7\u00b10.0 96.0\u00b10.1 96.9\u00b10.1 0.09\u00b10.01 vector-field marginal-simplex 98.6\u00b10.0 79.4\u00b10.3 86.2\u00b10.3 0.07\u00b10.00 Gaussian 99.5\u00b10.0 93.6\u00b10.7 94.7\u00b10.7 0.08\u00b10.01 Where Dir is a Dirichlet distribution parameterized by \u03b3 and \u03c9 represents time. The Dirichlet conditional flow must start at \u03c9 = 1 and only converges to \u03b4(x \u2212ei) in the limit \u03c9 \u2192\u221e. In order to incorporate Dirichlet flows into our model, we define the relation \u03c9t = \u03c9max\u03b1t + 1, where \u03b1t is defined by (8). Dirichlet flow matching necessitates the use of a uniform prior over the simplex for categorical variables and so we do not experiment with other simplex priors described in Section 3.5. 4.5 Baselines We compare FlowMol to three baselines: MiDi [23], JODO [22], and EquiFM [46]. MiDi and JODO perform the same generation task: predicting atom positions, atom types, formal charges, and bond orders. The key difference from FlowMol is that MiDi and JODO are diffusion models. EquiFM as described in Section 2.3 is a flow matching model for de novo molecule generation; however, the model does not predict bond orders or atomic charges. We do not report the performance of EquiFM on the GEOM dataset because the authors have not released a model checkpoint. 5 Results 5.1 Model Ablations Results of model ablation experiments on the QM9 dataset are shown in Table 1. Most notably, models that use SimplexFlow for categorical variables (those with categorical priors constrained to the simplex) consistently underperform models with Gaussian categorical priors. The best performing SimplexFlow model (endpoint parameterization, marginal-simplex prior) achieves 96.1% valid molecules while an equivalent model using a Gaussian prior achieves 96.9% valid molecules. Models trained under the endpoint objective achieve superior performance to otherwise identical models trained under the vector-field objective. For example, Table 1 shows that a model trained with the a marginal-simplex categorical prior obtains 79% stable molecules under the vector-field objective and 92% stable molecules under the endpoint objective. This is effect is also observed with models using a Gaussian categorical prior but to a lesser extent. We find that models using Dirichlet conditional probability paths [47] yields approximately equivalent performance to the conditional probability path (7) with a uniform-simplex categorical prior. Among models satisfying the constraints of SimplexFlow (sec. 3.4), the uniform-simplex prior yielded the worst performance. The marginal-simplex and barycenter priors yield approximately equivalent performance. Although the models using marginal-simplex and barycenter priors produce relatively fewer valid molecules, the molecules generated by these models exhibit the lowest Jensen-Shannon divergence to the energy distribution of the training data. 5.2 Comparison with Baselines FlowMol achieves superior performance to EquiFM [46] on QM9; for example, it produces 3% more valid molecules while having equivalent divergence to the training data energy distribution. FlowMol approaches the performance of diffusion baselines (JODO, MiDi) on QM9 but does not perform as well on the GEOM-Drugs dataset. The fact that fewer generated molecules are valid on the GEOM-Drugs dataset cannot be attributed solely to the difference in molecule sizes between the two datasets, because FlowMol\u2019s atom-level stability is also worse for GEOM-Drugs than QM9 (99.0% on GEOM vs 99.7% on QM9). Despite the fact that MiDi and FlowMol achieve equivalent atom-level stability (99.0%), MiDi produces significantly more topologically correct molecules. For example, FlowMol achieves 68% stable molecules while MiDi achieves 85%. 9 \fTable 2: Comparison of FlowMol to baseline models on the QM9 and GEOM-Drugs datasets Model Dataset Atoms Stable (%) (\u2191) Mols Stable (%) (\u2191) Mols Valid (%) (\u2191) JS(E) (\u2193) Inference Time (s) (\u2193) JODO [22] QM9 99.9\u00b10.0 98.7\u00b10.2 98.9\u00b10.2 0.12\u00b10.01 116\u00b12 MiDi [23] 99.8\u00b10.0 97.5\u00b10.1 98.0\u00b10.2 0.05\u00b10.00 89\u00b17 EquiFM [46] 99.4\u00b10.0 93.2\u00b10.3 94.4\u00b10.2 0.08\u00b10.00 25\u00b13 FlowMol (ours) 99.7\u00b10.0 96.2\u00b10.1 97.3\u00b10.1 0.08\u00b10.00 6\u00b10 JODO [22] GEOM-Drugs 99.8\u00b10.0 90.7\u00b10.5 76.5\u00b10.8 0.17\u00b10.01 235\u00b116 MiDi [23] 99.0\u00b10.2 85.1\u00b10.9 71.6\u00b10.9 0.23\u00b10.00 754\u00b1119 FlowMol (ours) 99.0\u00b10.0 67.5\u00b10.2 51.2\u00b10.3 0.33\u00b10.01 22\u00b11 FlowMol exhibits substantially faster inferences times than all baseline models. This difference is primarily due to the fewer number of integration steps needed by FlowMol. We find empirically that sample quality does not improve when using more than 100 integration steps. JODO, MiDi, and EquiFM use 1000 integration steps by default. The need for fewer integration steps than diffusion models is a recognized advantage of flow matching models over diffusion [17, 19]. 6 Discussion FlowMol improves upon the existing state of the art flow matching method for molecule generation; however, it still does not outperform diffusion models trained for the same task. A key difference between FlowMol and the diffusion baselines presented here is that the conditional trajectories are deterministic in FlowMol and stochastic in diffusion models. Prior works have presented theoretical [18] and empirical [21] evidence that stochastic conditional trajectories yield improved model performance. Our results raise interesting questions about the design of prior distributions for flow matching models. Our intuition was that a stronger prior that is \u201ccloser\u201d to the data distribution would yield more faithful recapitulation of the target distribution. The results of our model ablations suggest this intuition is incorrect. The next natural questions are: why is a Gaussian prior the most performant of those tested here? and what are the qualities of a prior that best enable recapitulation of the target distribution? A possible explanation for our results is a dependence on the \u201cvolume\u201d of the prior. Empirically when the prior for categorical features has support on a small number of unique values, the model fails to produce any valid molecules. Adding a \u201cblur\u201d as described in Section 3.5 dramatically improves model performance. Correspondingly, priors constrained to the simplex reliably yield poorer performance than Gaussian priors; these observations could all be explained through the perspective of the prior\u2019s capacity for serving as one domain of a homeomorphism to a more complex distribution. Another explanation for the superiority of Gaussian priors may involve the shape of conditional trajectories induced by the prior. Conditional trajectories are more likely to intersect when constrained to a smaller space, such as the simplex. This explanation is also supported by the observation that the marginal-simplex and barycenter priors yield substantially improved performance over uniform-simplex priors. Tong et al. [17] suggest that sampling conditional pairs (g0, g1) from an optimal transport (OT) alignment \u03c0(g0, g1) improves performance precisely because the marginal vector field yields straighter lines with fewer intersections. In this work, an OT plan is computed but only for atomic positions. Perhaps computing an OT alignment over the product space of all the data modalities represented here could alleviate this issue. 7 Conclusions FlowMol is the first generative model to jointly sample the topological and geometric structure of small molecules. FlowMol improves upon existing flow matching models for molecule generation and achieves competitive performance with diffusion-based models while exhibiting inference speeds an order of magnitude faster. We present a method for flow matching on categorical variables, SimplexFlow, and demonstrate that constraining flows to a smaller space does not yield performance benefits. We think this result raises interesting and relevant questions about the design of flow matching for mixed continuous/categorical generative tasks and provide potential hypotheses to begin exploring in future work. 10 \f8 Acknowledgements We thank Rishal Aggarwal, Gabriella Gerlach, and Daniel Pe\u00f1ahererra for useful feedback and discussions. This work is funded through R35GM140753 from the National Institute of General Medical Sciences. The content is solely the responsibility of the authors and does not necessarily represent the official views of the National Institute of General Medical Sciences or the National Institutes of Health.",
|
| 17 |
+
"additional_graph_info": {
|
| 18 |
+
"graph": [],
|
| 19 |
+
"node_feat": {
|
| 20 |
+
"Ian Dunn": [
|
| 21 |
+
{
|
| 22 |
+
"url": "http://arxiv.org/abs/2404.19739v1",
|
| 23 |
+
"title": "Mixed Continuous and Categorical Flow Matching for 3D De Novo Molecule Generation",
|
| 24 |
+
"abstract": "Deep generative models that produce novel molecular structures have the\npotential to facilitate chemical discovery. Diffusion models currently achieve\nstate of the art performance for 3D molecule generation. In this work, we\nexplore the use of flow matching, a recently proposed generative modeling\nframework that generalizes diffusion models, for the task of de novo molecule\ngeneration. Flow matching provides flexibility in model design; however, the\nframework is predicated on the assumption of continuously-valued data. 3D de\nnovo molecule generation requires jointly sampling continuous and categorical\nvariables such as atom position and atom type. We extend the flow matching\nframework to categorical data by constructing flows that are constrained to\nexist on a continuous representation of categorical data known as the\nprobability simplex. We call this extension SimplexFlow. We explore the use of\nSimplexFlow for de novo molecule generation. However, we find that, in\npractice, a simpler approach that makes no accommodations for the categorical\nnature of the data yields equivalent or superior performance. As a result of\nthese experiments, we present FlowMol, a flow matching model for 3D de novo\ngenerative model that achieves improved performance over prior flow matching\nmethods, and we raise important questions about the design of prior\ndistributions for achieving strong performance in flow matching models. Code\nand trained models for reproducing this work are available at\nhttps://github.com/dunni3/FlowMol",
|
| 25 |
+
"authors": "Ian Dunn, David Ryan Koes",
|
| 26 |
+
"published": "2024-04-30",
|
| 27 |
+
"updated": "2024-04-30",
|
| 28 |
+
"primary_cat": "q-bio.BM",
|
| 29 |
+
"cats": [
|
| 30 |
+
"q-bio.BM",
|
| 31 |
+
"cs.LG"
|
| 32 |
+
],
|
| 33 |
+
"main_content": "Introduction Deep generative models that can directly sample molecular structures with desired properties have the potential to accelerate chemical discovery by reducing or eliminating the need to engage in resource-intensive screening-based based discovery paradigms. Moreover, generative models may also improve chemical discovery by enabling multi-objective design of chemical matter. In pursuit of this idea, there has been recent interest in developing generative models for the design of small-molecule therapeutics [1\u20138], proteins [9\u201311], and materials [12]. State of the art performance in these tasks is presently achieved by applying diffusion models [13\u201315] to point cloud representations of molecular structures. Flow matching, a recently proposed generative modeling framework [16\u201319], generalizes diffusion models. Under diffusion models, the transformation of prior samples to data is formulated as a reversal of a predefined forward process. The forward process is a Markov chain or differential equation that must converge to a tractable stationary distribution as t \u2192\u221e; this requirement constrains the viable options for forward/reverse processes and prior distributions. In contrast, flow matching prescribes a method for directly learning a differential equation that maps samples from nearly arbitrary distributions. In doing so, flow matching permits valuable flexibility when designing models for specific applications. For example, Jing et al. [20] and St\u00e4rk et al. [21] make use of the fact that flow matching allows arbitrary prior distributions to design models whose priors are closer to realistic 3D molecular conformations than a Gaussian prior. In this work we explore the application of flow matching to 3D de novo small molecule generation. We adapt the approach of state of the art diffusion models for this task [22\u201324] to the flow matching framework. This approach entails predicting atom positions, atom types (chemical elements), formal charges, and bond orders between all pairs arXiv:2404.19739v1 [q-bio.BM] 30 Apr 2024 \fFigure 1: Overview of FlowMol Top: We adapt the flow matching framework for unconditional 3D molecule generation. An ordinary differential equation parameterized by a graph neural network transforms a prior distribution over atom positions, types, charges, and bond orders to the distribution of valid molecules. Black arrows show the instantaneous direction of the ODE on atom positions. Middle: Trajectory of the atom type vector for a single atom under SimplexFlow, a variant of flow matching developed for categorical variables. Atom type flows lie on the probability simplex. Bottom: Trajectory of an atom type vector starting from a Gaussian prior. This approach does not respect the categorical nature of the data; however, we find it yields superior performance to SimplexFlow. of atoms. All of these variables are categorical with the exception of atom positions. Therefore, molecule generation requires sampling from a joint distribution of continuous and categorical variables. Effectively adapting flow matching for this mixed continuous/categorical generative task may be non-trivial because the flow matching framework is predicated on the assumption of continuously valued data. In this work, we extend the flow matching framework to categorical data by constructing flows that are constrained to exist on a continuous representation of categorical data known as the probability simplex. We call this extension SimplexFlow. We present a model for de novo small-molecule generation that uses SimplexFlows to generate categorical features. This work was motivated by the intuition that designing a generative process that respects the categorical nature of the data it operates on may yield improved performance; however, our empirical results contradict this intuition. We show that in practice, a simpler approach that makes no accommodations for the categorical nature of the data yields superior performance to a de novo model using SimplexFlow. Our final flow matching model for molecule generation, FlowMol, achieves improved performance over existing flow matching methods for molecule generation and is competitive with state of the art diffusion models while exhibiting a >10-fold reduction in inference time. 2 Background 2.1 Discrete Diffusion The original formulation of diffusion models [13] was defined in terms of a Markov chain of random variables that converged to a tractable stationary distribution in the limit of an infinite number of steps in the Markov chain. This formulation made no assumptions about the sample space of the random variables modeled, allowing for natural extensions to discrete data [25\u201327]. A separate formulation of diffusion models as continuous-time stochastic differential equations (SDE)[15] became popular in the literature. The SDE formulation of diffusion models is dependent on the assumption of having continuously-valued data. Similar to our approach, there is a line of work developing SDE-based diffusion models that operate on continuous representations of discrete data. Several works developed diffusion models where diffusion trajectories were constrained to the simplex [28\u201330]. An alternative approach is to embed categorical features into a continuous latent space and train diffusion models on the embeddings [31]. 2 \f2.2 De Novo Molecule Generation Initial attempts at de novo molecule generation focused on generating either textual representations (SMILES strings) [32\u201334] or 2D molecular graphs [35\u201338]: molecular representations that exclude all information about 3D structure. Subsequent approaches were developed for 3D molecule generation using a variety of molecular representations and generative paradigms [39\u201343]. Hoogeboom et al. [44] proposed the first diffusion model for 3D molecule generation, which yielded superior performance over previous approaches. Molecules are represented in Hoogeboom et al. [44] by attributed point clouds where each atom has a position in space and type. A continuous diffusion process is defined for both atom positions and types where the prior for both is a standard Gaussian distribution. A purported weakness of this approach is that atom connectivity is not predicted by the model and must be inferred in a post-processing step. Several concurrent works sought to address these issues by predicting bond order in addition to atom positions/types: Huang et al. [22], Vignac et al. [23], Peng et al. [24], Hua et al. [45]. These models report substantially improved performance over Hoogeboom et al. [44]. Three of these four concurrent works (Vignac et al. [23], Peng et al. [24], Hua et al. [45]) use discrete diffusion processes for categorical features and attribute (in part) their improved model performance to the use of discrete diffusion.; however, only Peng et al. [24] presents an ablation study isolating the effect of discrete diffusion. Moreover, Huang et al. [22] uses only continuous diffusion processes and reports superior performance. This suggests that while predicting graph connectivity provides performance benefits, the utility of discrete diffusion for molecule generation is less clear. Vignac et al. [23] and Huang et al. [22] fully specify the molecular structure by also predicting atom formal charges and the presence of hydrogen atoms; for this reason, these works are the most similar to the model presented here. 2.3 Flow-Matching for De Novo Molecule Generation To our knowledge, Song et al. [46] is the only existing work that performs de novo molecule generation with flow matching. Molecules are represented as point clouds where each atom has a position in space and an atom type. The final molecule structure is inferred after the inference procedure. The prior distribution for atom type vectors is a standard Gaussian distribution, and so the generative process does not have any inductive biases to respect the discrete nature of the data. This work can be viewed as the flow matching analog of Hoogeboom et al. [44]. 2.4 Flow Matching for Discrete Data Concurrent work [47] developed a variant of flow matching on the simplex which we refer to as Dirichlet Flows. In Dirichlet Flows, conditional probability paths are only conditioned on x1 and, as a result, do not permit arbitrary choices of the prior and must use a uniform distribution over the simplex. In contrast, our formulation permits the use of any prior distribution. Stark et al. [47] identify problems with the choice of commonly used conditional vector fields that limit performance on variables with a large number of categories. They propose an alternative choice of conditional probability paths that alleviate this issue. There are also other works which develop flow matching variants for discrete data. Boll et al. [48] equip the simplex with the Fisher-Rao metric to form a Riemannian manifold, and apply Riemannian Flow Matching [49] to this manifold. Campbell et al. [50] develop a flow matching method for discrete data built on continuous-time Markov chains. Importantly, none of the aforementioned works, which present methods for training flow matching models for categorical data, benchmark their model performance against simpler flow matching models that do not account for the categorical nature of their data. 2.5 Flow Matching Flow matching [16\u201319] is a new generative modeling framework that generalizes diffusion models. Flow matching permits useful design flexibility in the choice of prior of and nature of the map between two distributions. Flow matching is also conceptually simpler than diffusion and permits substantially faster inference. We briefly describe the flow matching framework here. An ordinary differential equation (ODE) that exists on Rd is defined by a smooth, time-dependent vector-field u(x, t) : Rd \u00d7 [0, 1] \u2192Rd. dx dt = u(x, t) (1) 3 \fNote that we only consider this ODE on the time interval [0, 1]. For simplicity we will use ut(x) interchangeably with u(x, t). Given a probability distribution over initial positions x0 \u223cp0(x), the ODE (1) induces time dependent probability distributions pt(x). The objective in flow matching is to approximate a vector field ut(x) that pushes a source distribution p0(x) to a desired target distribution p1(x). A neural network u\u03b8 can be regressed to the vector field ut by minimizing the Flow Matching loss. LF M = Ex\u223cpt\u2225u\u03b8(x, t) \u2212ut(x)\u22252 (2) Computing LF M requires access to ut and pt, quantities that are typically intractable. Flow matching provides a method for approximating ut(x) without having access to it. If we consider the probability path pt(x) to be a mixture of conditional probability paths pt(x|z): pt(x) = Z pt(x|z)p(z)dz (3) and we know the form of the the conditional vector fields ut(x|z) that produce pt(x|z), then the marginal vector field ut(x) can be defined as a mixture of conditional vector fields: ut(x) = Ep(z) ut(x|z)pt(x|z) pt(x) (4) We still cannot compute ut(x) but the neural network u\u03b8 that is the minimizer of LF M is also the minimizer of the Conditional Flow Matching (CFM) loss defined in (5) LCF M = Ep(z),pt(x|z),t\u223cU(0,1)\u2225u\u03b8(x, t) \u2212ut(x|z)\u22252 (5) That is, regressing to conditional vector fields, in expectation, is equivalent to regressing to the marginal vector field. The remaining design choices for a flow matching model are the choice of conditioning variable z, conditional probability paths pt(x|z), and conditional vector fields ut(x|z). 3 Methods 3.1 Problem Setting We represent a molecule with N atoms as a fully-connected graph. Each atom is a node in the graph. Every atom has a position in space X = {xi}N i=1 \u2208RN\u00d73, an atom type (in this case the atomic element) A = {ai}N i=1 \u2208RN\u00d7na, and a formal charge C = {ci}N i=1 \u2208RN\u00d7nc. Additionally, every pair of atoms has a bond order E = {eij\u2200i, j \u2208 [N]|i \u0338= j} \u2208R(N 2\u2212N)\u00d7ne. Where na, nc, ne are the number of possible atom types, charges, and bond orders; these are categorical variables represented by one-hot vectors. For brevity, we denote a molecule by the symbol g, which can be thought of as a tuple of the constituent data types g = (X, A, C, E). There is no closed-form expression or analytical technique for sampling the distribution of realistic molecules p(g). We seek to train a flow matching model to sample this distribution. Concretely, we choose the the target distribution that is the distribution of valid 3D molecules p1(g) = p(g). Our choice of prior p0(g) is described in Section 3.5. Our strategy for adapting flow matching to molecular structure is one that mimics prior work on applying diffusion and flow-based generative models to molecular structure. That is, we define conditional vector fields and conditional probability paths for each data modality and jointly regress one neural network for all data modalities. Our total loss is a weighted combination of CFM losses from (5): L = \u03b7XLX + \u03b7ALA + \u03b7CLC + \u03b7ELE (6) Where (\u03b7X, \u03b7A, \u03b7C, \u03b7E) are scalars weighting the relative contribution of each loss term. We set these values to (3, 0.4, 1, 2) as was done in Vignac et al. [23]. Our specific choice of conditional vector fields and probability paths is described in Section 3.2. In practice, we use a variant of the CFM objective called the endpoint-parameterized objective that we present in Section 3.3. These choices are used to in turn to design SimplexFlow, our method of performing flow matching for categorical variables, which is described in Section 3.4. 4 \f3.2 Flow Matching with Temporally Non-Linear Interpolants We choose the conditioning variable to be the initial and final states of a trajectory: z = (g0, g1). We choose the conditional probability path to be a Dirac density placed on a \u201cstraight\u201d line connecting these states pt(g|g0, g1) = \u03b4(g \u2212(1 \u2212\u03b1t)g0 \u2212\u03b1tg1). This particular choice of conditional vector fields and probability paths gives us the freedom to choose any prior distribution p0(g) [17, 18, 36]. Our choice of pt(g|g0, g1) is equivalent to defining a deterministic interpolant: gt = (1 \u2212\u03b1t)g0 + \u03b1tg1 (7) where \u03b1t : [0, 1] \u2192[0, 1] is a function that takes t as input and returns a value between 0 and 1. The rate at which a molecule from the prior distribution g0 is transformed into a valid molecule g1 can be controlled by choice of \u03b1t, which we name the \u201cinterpolant schedule.\u201d1 We define separate interpolant schedules for each data type comprising a molecule: \u03b1t = (\u03b1X t , \u03b1A t , \u03b1C t , \u03b1E t ). Taking inspiration from Vignac et al. [23], we define a cosine interpolant schedule: \u03b1t = 1 \u2212cos2 \u0010\u03c0 2 t\u03bd\u0011 (8) where different values of \u03bd are set for atom positions, types, charges, and bond orders. The interpolant (7) gives rise to conditional vector fields of the form: u(gt|g0, g1) = \u03b1\u2032 t(g1 \u2212g0) (9) Where \u03b1\u2032 t is the time derivative of \u03b1t. 3.3 Endpoint Parameterization By solving (7) for g0 and substituting this expression into (9) we obtain an alternate form of the conditional vector field. u(gt|g0, g1) = \u03b1\u2032 t 1 \u2212\u03b1t (g1 \u2212gt) (10) As described in Section 2.5, the typical flow matching procedure is to regress a neural network u\u03b8(gt) directly to conditional vector fields by minimizing the CFM loss (5). Instead, we apply a reparameterization initially proposed by Jing et al. [20]: u\u03b8(gt) = \u03b1\u2032 t 1 \u2212\u03b1t (\u02c6 g1(gt) \u2212gt) (11) By substituting (11) and (10) into (5), we obtain our endpoint-parameterized objective LEP = Et,gt \u0014 \u03b1\u2032 t 1 \u2212\u03b1t ||\u02c6 g1(gt) \u2212g1|| \u0015 (12) Therefore our objective becomes to train a neural network that predicts valid molecular structures given samples from a conditional probability path \u02c6 g1(gt). This is particularly advantageous when operating on categorical data, as placing a softmax layer on model outputs constrains the domain of model outputs to the simplex. Empirically, we find that the endpoint objective yields better performance than the vector field regression objective (5) for the task of molecule generation. Moreover, we leverage the theoretical guarantee that our predicted endpoint for categorical data lie on the simplex to ensure our flows lie on the simplex. In practice, the interpolant-dependent loss weight \u03b1\u2032 t 1\u2212\u03b1t produces unreasonably large values as \u03b1t \u21921. We replace this term with a time-dependent loss function inspired by Le et al. [51]: w(t) = min(max(0.005, \u03b1t 1\u2212\u03b1t ), 1.5). For categorical variables we use a cross entropy loss rather than the L2 norm shown in (12). 1This is intended to be analogous to noise schedules for diffusion models. 5 \f3.4 SimplexFlow To design flow matching for categorical data, our strategy is to define a continuous representation of categorical variables, and then construct a flow matching model where flows are constrained to this representation. We choose the d-dimensional probability simplex Sd as the continuous representation of a d-categorical variable. Sd = \b x \u2208Rd|xi > 0, 1 \u00b7 x = 1 \t (13) A d-categorical variable x1 \u2208{1, 2, . . . , d} can be converted to a point on Sd via one-hot encoding. Correspondingly, the categorical distribution p1(x) = C(q) can be converted to a distribution on Sd as: p1(x) = d X i=1 qi\u03b4(x \u2212ei) (14) where ei is the ith vertex of the simplex and qi is the probability of x belonging to the ith category. If we choose a prior distribution p0(x) such that supp(p0) = Sd, then all conditional probability paths produced by the interpolant (7) will lie on the simplex. This is because the simplex is closed under linear interpolation (see Appendix A) and the conditional trajectories are obtained by linearly interpolating between two points on the simplex (x0, x1 \u2208Sd). Although choosing (p0, p1) with support on the simplex results in conditional trajectories on the simplex, training a flow under the vector field objective (5) provides no guarantee that trajectories produced by the learned vector field lie on the simplex. However, training a flow matching model under the endpoint parameterization (Section 3.3) enables us to guarantee by construction that generated flows lie on the simplex; proof of this is provided in Appendix B. 3.5 Priors We define the prior distribution for a molecule as a composition of independent samples for each atom and pair of atoms. Our prior distributions take the form: p0(g) = p0(x, a, c, e) = Natoms Y i=1 p0(xi)p0(ai)p0(ci) \u00d7 Natoms Y i,j<i p0(eij) (15) Our choice of conditional trajectory (7) permits the choice of any prior distribution. SimplexFlow places the constraint that the prior distribution for categorical variables have support bounded to the simplex. We always set p0(xi) = N(xi|0, I); atom positions are independently sampled from a standard Gaussian distribution. We explore the use of several prior distributions for categorical variables ai, ci, eij. We experiment with three different categorical priors for SimplexFlow. The uniform-simplex prior is a uniform distribution over the simplex; the simplest choice for a categorical prior. This choice is analogous to the \u201cLinear FM\u201d model described in [47]. The marginalsimplex prior is designed to be \u201ccloser\u201d to the data distribution by using marginal distributions observed in the training data. Specifically, we replace p0(ai)p0(ci) and p0(eij) in (15) with p1(ai, ci) and p1(eij), respectively. Finally, for the barycenter prior, categorical variables are placed at the barycenter of the simplex; the point in the center of the simplex assigning equal probability to all categories. The intuition behind the barycenter prior is all categorical variables will be \u201cundecided\u201d at t = 0. In practice, the model fails when the prior distributions for categorical variables only have density on a small, fixed number of points on the simplex; this is the case for the marginal-simplex and barycenter priors. We find that \"blurring\" the prior samples for categorical variables significantly improves performance. That is, Gaussian noise is added to the samples before they are projected back onto the simplex. 3.6 Optimal Transport Alignment Previous work [17] has shown that aligning prior and target samples via optimal transport significantly improves the performance of flow matching by minimizing the extent to which conditional trajectories intersect. When performing flow matching on molecular structure, this consists of computing the optimal permutation of node ordering and the rigid-body alignment of atom positions [46, 52]. We apply the same alignment between target and prior positions at training time. This also ensures that prior positions p0(X) and target positions p1(X) effectively exist in the center of mass free subspace proposed in Hoogeboom et al. [44] that renders the target density p1(g) invariant to translations. 6 \fFigure 2: FlowMol Architecture Top left: An input molecular graph gt is transformed into a predicted final molecular graph g1 by being passed through multiple molelcule update blocks. Top right: A molecule update block uses NFU, NPU, and EFU sub-components to update all molecular features. Bottom: Update equations for graph features. \u03d5 and \u03c8 is used to denote MLPs and GVPs, respectively. 3.7 Model Architecture Molecules are treated as fully-connected graphs. The model is designed to accept a sample gt and predict the final destination molecule g1. Within the neural network, molecular features are grouped into node positions, node scalar features, node vector features, and edge features. Node positions are identical to atom positions discussed in Section 3.1. Node scalar features are a concatenation of atom type and atom charge. Node vector features are geometric vectors (vectors with rotation order 1) that are relative to the node position. Node vector features are initialized to zero vectors. Molecular features are iteratively updated by passing gt through several Molecule Update Blocks. A Molecule Update Block uses Geometric Vector Perceptrons (GVPs) [53] to handle vector features. Molecule Update Blocks are composed of three components: a node feature update (NFU), node position update (NPU) and edge feature update (EFU). The NFU uses a message-passing graph convolution to update node features. The NPU and EFU blocks are node and edge-wise operations, respectively. Following several molecule update blocks, predictions of the final categorical features ( \u02c6 A1, \u02c6 C1, \u02c6 E1) are generated by passing node and edge features through shallow node-wise and edge-wise multi layer perceptrons (MLPs). For models using endpoint parameterization, these MLPs include softmax activations. The model architecture is visualized in Figure 2 and explained in detail in Appendix D. In practice, graphs are directed. For every pair of atoms i, j there exists edges in both directions: i \u2192j and j \u2192i. When predicting the final bond orders \u02c6 E1 for an edge, we ensure that one prediction is made per pair of atoms and that this prediction is invariant to permutations of the atom indexing. This is accomplished by making our prediction from the sum of the learned bond features. That is, \u02c6 eij 1 = MLP(eij + eji). GVPs, as they were originally designed, predict vector quantities that are E(3)-equivariant. We introduce a variant of GVP that is made SE(3)-equivariant by the addition of cross product operations. The cross product is equivariant to rotations and translations of input vectors but not reflections. As a result, the learned density p1(g) is invariant to rotations and translations but not reflections. In other words, FlowMol is sensitive to chirality. Empirically we find that the addition of cross product operations to GVP improves performance. Schneuing et al. [3] proposed the addition of a cross product operation to the EGNN architecture [54]; we adopt this idea for GVP. We refer the reader to Appendix F of Schneuing et al. [3] for a detailed discussion of the equivariance of cross products. Our cross product variant of GVP is described in Appendix D.1. 7 \f4 Experiments 4.1 Datasets We train on QM9 [55, 56] and GEOM-Drugs [57] using explicit hydrogens. QM9 contains 124k small molecules, each with one 3D conformation. GEOM-Drugs contains approximately 300k larger, drug-like molecules with multiple conformers for each molecule. Molecules in QM9 have an average of 18 atoms a max of 29 while those in GEOM-Drugs have an average of 44 atoms and a max of 181. We use the same dataset splits as Vignac et al. [23]. We chose to use explicit hydrogens because it is a more difficult learning task. By predicting explicit hydrogens in combination with atom types, bond orders, and formal charges, there is a 1-to-1 mapping from model outputs to molecules. If any one of these components were removed from the generative task, one model output could plausibly be interpreted as multiple molecular structures, and so it is \u201ceasier\u201d for the model output to be interpreted as \u201ccorrect\u201d or \u201cvalid.\u201d We view the task of predicting graph topology and structure with explicit hydrogens and formal charges as the most rigorous evaluation of the capabilities of generative models to fit the distribution of valid molecular structures. 4.2 Model Evaluation We report three metrics measuring the validity of generated molecular topology: percent atoms stable, percent molecules stable, and percent molecules valid. An atom is defined as \u201cstable\u201d if it has valid valency. Atomic valency is defined as the sum of bond orders that an atom is participating atom. Aromatic bonds are assigned a bond order of 1.5. A valid valency is defined as any valency that is observed in the training data for atoms of a given element and formal charge. A molecule is counted as stable if all of its constituent atoms are stable. A molecule is considered \u201cvalid\u201d if it can be sanitized by rdkit [58] using default sanitization settings. Metrics regarding the validity of molecular topology fail to capture a model\u2019s ability to reproduce reasonable molecular geometries. Therefore, we also compute the Jensen-Shannon divergence of the distribution of potential energies for molecules in the training data and molecules sampled from trained models. Potential energies are obtained from the Merck Molecular Mechanics Force-Field implemented in rdkit [58]. Force-field energy cannot be obtained for molecules that cannot be sanitized by rdkit, and so the reported Jensen-Shannon divergences are for valid molecules only. Molecule quality metrics are reported for samples of 10, 000 molecules, repeated 5 times. We report inference time for FlowMol and baseline models. We measure inference time as the time required to generate one batch of 100 molecules on the same NVIDIA GeForce RTX 2060 GPU. This inference procedure is also repeated five times. Inference is run on FlowMol using Euler integration with 100 evenly-spaced timesteps. All results are reported with 95% confidence intervals. For all samplings, the number of atom in each molecule is sampled from the distribution of atoms in molecules from the training data. 4.3 Model Ablations We train multiple versions of our model to evaluate the effects of several aforementioned design choices. To observe the effect of endpoint reparameterization (sec 3.3), we train equivalent models with both the vector-field objective (5) and the endpoint objective (12). We train models using SimplexFlow with all three categorical priors proposed in Section 3.5 which have support on the simplex. To determine whether SimplexFlow improves performance, we also train models where the prior distribution for categorical features is a standard Gaussian distribution. In this setting, the generated flows are not constrained to the simplex, and it can be said that the flows do not \u201crespect\u201d the categorical nature of the data. This is similar to the atom type flows in Song et al. [46] and atom type diffusion in Hoogeboom et al. [44]. All of the mentioned model ablations are tested on the QM9 dataset and the results are presented in Section 5.1. A subset of these ablations were also performed on the GEOM dataset. GEOM ablations are available in Appendix E. None of the effects observed in GEOM ablations contradict those seen for QM9 ablations. For metrics reported in ablations, results are averaged over two identical models trained with different random seeds 4.4 Comparison to Dirichlet Flows We compare SimplexFlow to concurrent work that developed Dirichlet Flows [47] for flow matching on the simplex. Briefly, for a d-categorical variable x represented as a point on the simplex, the conditional probability path is pt(x|x1 = ei) = Dir(x|\u03b3 = 1 + ei\u03c9) (16) 8 \fTable 1: FlowMol Ablations on QM9 with explicit hydrogens Flow Type Categorical Prior Atoms Stable (%) (\u2191) Mols Stable (%) (\u2191) Mols Valid (%) (\u2191) JS(E) (\u2193) Dirichlet uniform-simplex 98.4\u00b10.0 80.0\u00b10.3 85.5\u00b10.3 0.15\u00b10.01 endpoint uniform-simplex 98.9\u00b10.1 84.2\u00b10.9 88.9\u00b10.6 0.11\u00b10.01 marginal-simplex 99.5\u00b10.1 91.9\u00b10.7 96.1\u00b10.2 0.06\u00b10.00 barycenter 99.5\u00b10.0 91.4\u00b10.5 93.6\u00b10.5 0.05\u00b10.00 Gaussian 99.7\u00b10.0 96.0\u00b10.1 96.9\u00b10.1 0.09\u00b10.01 vector-field marginal-simplex 98.6\u00b10.0 79.4\u00b10.3 86.2\u00b10.3 0.07\u00b10.00 Gaussian 99.5\u00b10.0 93.6\u00b10.7 94.7\u00b10.7 0.08\u00b10.01 Where Dir is a Dirichlet distribution parameterized by \u03b3 and \u03c9 represents time. The Dirichlet conditional flow must start at \u03c9 = 1 and only converges to \u03b4(x \u2212ei) in the limit \u03c9 \u2192\u221e. In order to incorporate Dirichlet flows into our model, we define the relation \u03c9t = \u03c9max\u03b1t + 1, where \u03b1t is defined by (8). Dirichlet flow matching necessitates the use of a uniform prior over the simplex for categorical variables and so we do not experiment with other simplex priors described in Section 3.5. 4.5 Baselines We compare FlowMol to three baselines: MiDi [23], JODO [22], and EquiFM [46]. MiDi and JODO perform the same generation task: predicting atom positions, atom types, formal charges, and bond orders. The key difference from FlowMol is that MiDi and JODO are diffusion models. EquiFM as described in Section 2.3 is a flow matching model for de novo molecule generation; however, the model does not predict bond orders or atomic charges. We do not report the performance of EquiFM on the GEOM dataset because the authors have not released a model checkpoint. 5 Results 5.1 Model Ablations Results of model ablation experiments on the QM9 dataset are shown in Table 1. Most notably, models that use SimplexFlow for categorical variables (those with categorical priors constrained to the simplex) consistently underperform models with Gaussian categorical priors. The best performing SimplexFlow model (endpoint parameterization, marginal-simplex prior) achieves 96.1% valid molecules while an equivalent model using a Gaussian prior achieves 96.9% valid molecules. Models trained under the endpoint objective achieve superior performance to otherwise identical models trained under the vector-field objective. For example, Table 1 shows that a model trained with the a marginal-simplex categorical prior obtains 79% stable molecules under the vector-field objective and 92% stable molecules under the endpoint objective. This is effect is also observed with models using a Gaussian categorical prior but to a lesser extent. We find that models using Dirichlet conditional probability paths [47] yields approximately equivalent performance to the conditional probability path (7) with a uniform-simplex categorical prior. Among models satisfying the constraints of SimplexFlow (sec. 3.4), the uniform-simplex prior yielded the worst performance. The marginal-simplex and barycenter priors yield approximately equivalent performance. Although the models using marginal-simplex and barycenter priors produce relatively fewer valid molecules, the molecules generated by these models exhibit the lowest Jensen-Shannon divergence to the energy distribution of the training data. 5.2 Comparison with Baselines FlowMol achieves superior performance to EquiFM [46] on QM9; for example, it produces 3% more valid molecules while having equivalent divergence to the training data energy distribution. FlowMol approaches the performance of diffusion baselines (JODO, MiDi) on QM9 but does not perform as well on the GEOM-Drugs dataset. The fact that fewer generated molecules are valid on the GEOM-Drugs dataset cannot be attributed solely to the difference in molecule sizes between the two datasets, because FlowMol\u2019s atom-level stability is also worse for GEOM-Drugs than QM9 (99.0% on GEOM vs 99.7% on QM9). Despite the fact that MiDi and FlowMol achieve equivalent atom-level stability (99.0%), MiDi produces significantly more topologically correct molecules. For example, FlowMol achieves 68% stable molecules while MiDi achieves 85%. 9 \fTable 2: Comparison of FlowMol to baseline models on the QM9 and GEOM-Drugs datasets Model Dataset Atoms Stable (%) (\u2191) Mols Stable (%) (\u2191) Mols Valid (%) (\u2191) JS(E) (\u2193) Inference Time (s) (\u2193) JODO [22] QM9 99.9\u00b10.0 98.7\u00b10.2 98.9\u00b10.2 0.12\u00b10.01 116\u00b12 MiDi [23] 99.8\u00b10.0 97.5\u00b10.1 98.0\u00b10.2 0.05\u00b10.00 89\u00b17 EquiFM [46] 99.4\u00b10.0 93.2\u00b10.3 94.4\u00b10.2 0.08\u00b10.00 25\u00b13 FlowMol (ours) 99.7\u00b10.0 96.2\u00b10.1 97.3\u00b10.1 0.08\u00b10.00 6\u00b10 JODO [22] GEOM-Drugs 99.8\u00b10.0 90.7\u00b10.5 76.5\u00b10.8 0.17\u00b10.01 235\u00b116 MiDi [23] 99.0\u00b10.2 85.1\u00b10.9 71.6\u00b10.9 0.23\u00b10.00 754\u00b1119 FlowMol (ours) 99.0\u00b10.0 67.5\u00b10.2 51.2\u00b10.3 0.33\u00b10.01 22\u00b11 FlowMol exhibits substantially faster inferences times than all baseline models. This difference is primarily due to the fewer number of integration steps needed by FlowMol. We find empirically that sample quality does not improve when using more than 100 integration steps. JODO, MiDi, and EquiFM use 1000 integration steps by default. The need for fewer integration steps than diffusion models is a recognized advantage of flow matching models over diffusion [17, 19]. 6 Discussion FlowMol improves upon the existing state of the art flow matching method for molecule generation; however, it still does not outperform diffusion models trained for the same task. A key difference between FlowMol and the diffusion baselines presented here is that the conditional trajectories are deterministic in FlowMol and stochastic in diffusion models. Prior works have presented theoretical [18] and empirical [21] evidence that stochastic conditional trajectories yield improved model performance. Our results raise interesting questions about the design of prior distributions for flow matching models. Our intuition was that a stronger prior that is \u201ccloser\u201d to the data distribution would yield more faithful recapitulation of the target distribution. The results of our model ablations suggest this intuition is incorrect. The next natural questions are: why is a Gaussian prior the most performant of those tested here? and what are the qualities of a prior that best enable recapitulation of the target distribution? A possible explanation for our results is a dependence on the \u201cvolume\u201d of the prior. Empirically when the prior for categorical features has support on a small number of unique values, the model fails to produce any valid molecules. Adding a \u201cblur\u201d as described in Section 3.5 dramatically improves model performance. Correspondingly, priors constrained to the simplex reliably yield poorer performance than Gaussian priors; these observations could all be explained through the perspective of the prior\u2019s capacity for serving as one domain of a homeomorphism to a more complex distribution. Another explanation for the superiority of Gaussian priors may involve the shape of conditional trajectories induced by the prior. Conditional trajectories are more likely to intersect when constrained to a smaller space, such as the simplex. This explanation is also supported by the observation that the marginal-simplex and barycenter priors yield substantially improved performance over uniform-simplex priors. Tong et al. [17] suggest that sampling conditional pairs (g0, g1) from an optimal transport (OT) alignment \u03c0(g0, g1) improves performance precisely because the marginal vector field yields straighter lines with fewer intersections. In this work, an OT plan is computed but only for atomic positions. Perhaps computing an OT alignment over the product space of all the data modalities represented here could alleviate this issue. 7 Conclusions FlowMol is the first generative model to jointly sample the topological and geometric structure of small molecules. FlowMol improves upon existing flow matching models for molecule generation and achieves competitive performance with diffusion-based models while exhibiting inference speeds an order of magnitude faster. We present a method for flow matching on categorical variables, SimplexFlow, and demonstrate that constraining flows to a smaller space does not yield performance benefits. We think this result raises interesting and relevant questions about the design of flow matching for mixed continuous/categorical generative tasks and provide potential hypotheses to begin exploring in future work. 10 \f8 Acknowledgements We thank Rishal Aggarwal, Gabriella Gerlach, and Daniel Pe\u00f1ahererra for useful feedback and discussions. This work is funded through R35GM140753 from the National Institute of General Medical Sciences. The content is solely the responsibility of the authors and does not necessarily represent the official views of the National Institute of General Medical Sciences or the National Institutes of Health."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"url": "http://arxiv.org/abs/2311.13466v2",
|
| 37 |
+
"title": "Accelerating Inference in Molecular Diffusion Models with Latent Representations of Protein Structure",
|
| 38 |
+
"abstract": "Diffusion generative models have emerged as a powerful framework for\naddressing problems in structural biology and structure-based drug design.\nThese models operate directly on 3D molecular structures. Due to the\nunfavorable scaling of graph neural networks (GNNs) with graph size as well as\nthe relatively slow inference speeds inherent to diffusion models, many\nexisting molecular diffusion models rely on coarse-grained representations of\nprotein structure to make training and inference feasible. However, such\ncoarse-grained representations discard essential information for modeling\nmolecular interactions and impair the quality of generated structures. In this\nwork, we present a novel GNN-based architecture for learning latent\nrepresentations of molecular structure. When trained end-to-end with a\ndiffusion model for de novo ligand design, our model achieves comparable\nperformance to one with an all-atom protein representation while exhibiting a\n3-fold reduction in inference time.",
|
| 39 |
+
"authors": "Ian Dunn, David Ryan Koes",
|
| 40 |
+
"published": "2023-11-22",
|
| 41 |
+
"updated": "2024-05-08",
|
| 42 |
+
"primary_cat": "q-bio.BM",
|
| 43 |
+
"cats": [
|
| 44 |
+
"q-bio.BM",
|
| 45 |
+
"cs.LG"
|
| 46 |
+
],
|
| 47 |
+
"main_content": "Introduction There has been a surge of interest in leveraging diffusion models to address problems in structurebased drug design. These efforts have yielded promising outcomes, exemplified by successes in de novo ligand design [1], molecular docking [2], fragment linker design [3], and scaffold hopping [4]. These models apply diffusion processes on point cloud representations of protein/ligand complexes and employ geometric Graph Neural Networks (GNNs) to make denoising predictions. However, GNN memory and compute requirements scale unfavorably with graph size, and this scaling issue poses a particular challenge within diffusion models, due to their reliance on multiple forward passes to generate a single sample. Some of these molecular diffusion models use coarse-grained representations of their molecular systems to make training and inference computationally feasible [2, 4]. While [3] and [1] train models with both coarse-grained and all-atom protein representations, their results show superior performance when using all-atom representations at the cost of more expensive/time-consuming training and inference. This is likely because residue-level representations discard precise information regarding the orientation of side chains; information which is critical for modeling binding events [5\u20137]. In developing molecular diffusion models and applying them at scale, researchers must grapple with the trade off between the computational demands and performance afforded by their choice of molecular representation. This work proposes a new choice of molecular representation which simultaneously enjoys the expressiveness of all-atom representations and computational efficiency of coarse-grained representations. In summary, our main contributions are: 1Our code is publicly available https://github.com/dunni3/keypoint-diffusion NeurIPS 2023 Generative AI and Biology Workshop arXiv:2311.13466v2 [q-bio.BM] 8 May 2024 \f1. A novel GNN-based architecture for learning condensed representations of molecular structure, allowing end-to-end training for downstream tasks that operate on these latent geometric representations. 2. A diffusion model for de novo ligand design that achieves a 3-fold increase in inference speed by conditioning ligand generation on a learned representation of protein structure. 2 Background Denoising diffusion probabilistic models Diffusion models [8, 9] define a forward diffusion process consisting of T noising steps that convert samples from a data distribution at step t = 0 to samples from a prior distribution at step t = T by repeated additions of random noise. The forward diffusion process conditioned on an initial data point x0 can be defined by Equation 1. q(xt|x0) = N(xt|\u03b1tx0, \u03c32 t I) (1) Where \u03b1t, \u03c3t \u2208R+ are functions that control the amount of signal retained from and noise added to x0, respectively. In this work, \u03b1t is a function that smoothly transitions from \u03b10 \u22481 to \u03b1T \u22480. We specifically work with variance-preserving diffusion processes for which \u03b1t = p 1 \u2212\u03c32 t . Equation 1 can be equivalently written as: xt = \u03b1tx0 + \u03c3t\u03f5 \u03f5 \u223cN(0, I) (2) A neural network that is trained to predict \u03f5 from noisy data points xt can be used to parameterize a reverse diffusion process p\u03b8(xt\u22121|xt) that converts samples from the prior distribution to samples from the training data distribution. We refer to this neural network as the \u201cnoise prediction network\u201d \u02c6 \u03f5\u03b8(xt, t). Equivariant diffusion on molecules Hoogeboom et al. [10] propose Equivariant Diffusion Models (EDMs) for generating 3D molecules. In this setting, a molecule with N atoms is considered as a point cloud with positions x \u2208RN\u00d73 and features s \u2208RN\u00d7f, which are one-hot encoded atom types. For notational convenience, point clouds are represented with a single variable z = [x, s]. A forward diffusion process similar to that of Equation 1 is defined for both atom positions and features. The noise prediction network \u02c6 \u03f5\u03b8(zt, t) outputs an E(3)-equivariant vector \u02c6 \u03f5(x) and E(3)-invariant vector \u02c6 \u03f5(s) for each node, representing the noise to be removed from atom positions and features, respectively. Diffusion for protein-ligand complexes Schneuing et al. [1] introduce a conditional EDM for generating small molecules inside of a protein binding pocket, DiffSBDD. Both the ligand and the protein binding pocket are represented as point clouds z(L) and z(P ), respectively. z(L) is an all-atom point cloud having one node per atom while z(P ) is either an all-atom point cloud or a C\u03b1 point cloud containing one node for every residue in the binding pocket located at the alpha carbon position. Schneuing et al. [1] propose two distinct diffusion processes for pocket-conditioned generation. The first is a conditional diffusion model where the diffusion process is defined only for z(L); the noise prediction network takes as input the noisy ligand z(L) t and the pocket structure z(P ) remains unchanged throughout the denoising process: \u02c6 \u03f5\u03b8(z(L) t , z(P ), t). The second method defines a joint diffusion process on both z(L) and z(P ). \u02c6 \u03f5\u03b8 is trained to denoise both the ligand and pocket at every timestep \u02c6 \u03f5\u03b8(z(L) t , z(P ) t , t), and an inpainting procedure is used to generate ligands inside a given pocket. In both cases, z(L) and z(P ) are passed to \u02c6 \u03f5\u03b8 as a heterogeneous graph where nodes are atoms or residues and edges are created based on euclidean distance between nodes. 3 Method To train a conditional EDM for pocket-conditioned ligand generation as described in [1], the noise prediction network \u02c6 \u03f5\u03b8 must have access to some representation of the protein binding pocket. Taking 2 \fFigure 1: Message passing is performed between receptor nodes. Learned receptor embeddings are used to place keypoints inside the binding pocket. Keypoints extract local features of the binding pocket. Keypoints are then used to condition the ligand generation process. inspiration from Ganea et al. [11], we propose to use an encoder E\u03b8(z(P )) that accepts an all-atom protein point cloud as input and returns a small, fixed-size point cloud z(KP ), which we term the \u201ckeypoint representation\u201d. The receptor encoder and diffusion model can be trained end-to-end by minimizing the denoising loss function Equation 3. LDSM = Et\u223cU(0,T ),z(L) 0 ,z(P ) \u0014\r \r \r\u03f5 \u2212\u02c6 \u03f5\u03b8 \u0010 z(L) t , E\u03b8(z(P )), t \u0011\r \r \r 2\u0015 (3) DiffSBDD [1] and EDMs [10] parameterize \u02c6 \u03f5\u03b8 using the geometric GNN architecture known as EGNN [12]. Within the EGNN architecture, nodes possess a single vector feature that, in practice, is designated as the node\u2019s position in space. As a result, there is no point in the EGNN architecture where a node retains geometric information describing its local environment. We intuit that EGNNbased architectures may exhibit poor performance on structure representations where a node cannot be adequately described by a single point-mass i.e., residue or fragment point clouds. Specifically, we hypothesize that EGNN may struggle to learn representations of protein structure that are both informative and condensed. To investigate this phenomenon, we train all models with both EGNN and Geometric Vector Perceptron (GVP) [13, 14] based architectures. GVP-GNN can be seen as a generalization of EGNN to the setting where nodes can have an arbitrary number of vector features [15]. Pocket encoder module The pocket encoder module is designed to take an all-atom point cloud of the protein binding pocket z(P ) as input and produce a point cloud z(KP ) = E\u03b8(z(P )) having K nodes as output. K is a hyperparameter of the model chosen to be significantly smaller than the number of atoms in a binding pocket. In our training dataset, binding pockets have on the order of hundreds of nodes. We present results for models with K = 40 which is close to the average number of residues in a binding pocket. The nodes of z(KP ), referred to as keypoints, have positions in space xi \u2208R3 as well as scalar features si \u2208Rd. When the pocket encoder module is parameterized with GVP-GNN, each keypoint is also endowed with vector features vi \u2208Rc\u00d73. The sequence of operations within the pocket encoder module are summarized in Figure 1. First, message passing is performed along edges between binding pocket atoms. Keypoint nodes are then added to the graph without positions or features. Edges are drawn from receptor nodes to keypoint nodes to form a unidirectional complete bipartite graph. Keypoint positions are obtained via a dot-product variant of graph attention [16] along pocket-keypoint edges. Following keypoint position assignment is a \u201cgraph rewiring\u201d step that selectively removes the aforementioned pocket-keypoint edges such that keypoints only have incoming edges from the nearest pocket atoms. Finally, message passing along these local pocket-keypoint edges endows keypoint nodes with spatially localized features. Additional architectural details including equations for graph convolutions and keypoint placement are provided in Appendix B. Optimal transport loss We find that enforcing spatial alignment between keypoint positions and the true protein/ligand interface is a useful inductive bias. For each protein/ligand pair in the training set we compute a set of interface points x(IP ) \u2208RS\u00d73 that are defined as the median points between 3 \fFigure 2: Left, Middle: CDFs of ligand RMSD from force-field minimization and Vina score. Right: Sampling time per molecule averaged over the same ten binding pockets for each model. all pairs of ligand atoms and binding pocket atoms < 5\u00c5 apart. We apply an optimal transport loss function that is minimized when keypoint positions align with the true protein/ligand interface. LOT = min T \u2208U(S,K)\u27e8T, C\u27e9 where Cs,k = \r \r \rx(KP ) k \u2212x(IP ) s \r \r \r 2 (4) Where U(S, K) is the set of transport plans with uniform marginals and \u27e8T, C\u27e9is the Frobenius inner product between the transport map T and the cost-matrix C. The optimal transport plan is solved in the forward pass using the python optimal transport package [17] and is held fixed during the backwards pass. 4 Results Experiments We train all models on the BindingMOAD dataset [18] which contains approximately 40,000 experimentally determined protein/ligand structures from the Protein Data Bank [19]. We train baseline models where the ligand point cloud is connected to the input protein point cloud without the use of any keypoint representation. Baseline models are trained with all-atom and C\u03b1 protein representations. We also train keypoint, all-atom, and C\u03b1 models with both EGNN and GVP architectures to evaluate the effect of GNN expressivity. We sample 100 ligands from every pocket in the test set. Generated ligands are subjected to a force-field minimization while holding the binding pocket fixed. We measure the RMSD of the ligand pose before and after minimization. If the ligand is in an unreasonable pose or forming unfavorable interactions with the binding pocket, there will be a larger RMSD upon minimization. Additionally, we use the Autodock Vina scoring function [20] to score the force-field minimized ligands and use the distribution of scores as a proxy for how well ligands are designed for their target pocket. Generated Ligand Quality We evaluate ligand quality by cumulative density functions of the RMSD from force-field minimization and Vina score shown in Figure 2, with higher CDF values indicating higher quality ligands for both metrics. Most notably, the GVP keypoint model performance is comparable to the all-atom models despite using 10x fewer nodes to represent the binding pocket. Models using C\u03b1 binding pocket representations produce ligands of lower quality than those that use all-atom pocket representations; this is consistent with prior works [1, 3]. The EGNN keypoint model produces ligands of equivalent quality to that of C\u03b1 models. Inference Performance We sample 100 molecules per pocket for 10 binding pockets and report mean wall-time per binding pocket. Sampling times in Figure 2 show that keypoint models are 3x faster than their corresponding all-atom models. Additional results in Appendix D.2 show that we can trade-off inference time and ligand quality by changing the number of keypoints. 4 \f5 Conclusions Our receptor encoder module is capable of learning compressed representations of binding pocket structure which enables a 3x reduction in inference time while maintaining comparable quality of generated ligands. Our receptor encoder module may serve as a useful tool for scaling inference in molecular diffusion models. Moreover, our work demonstrates that learned structure encoders can provide valuable flexibility to trade-off computational demands and model performance. The GVP keypoint model was able to approach all-atom levels of performance while the EGNN keypoint model failed to exceed the performance C\u03b1 representations. This result supports our hypothesis that EGNN struggles to learn on molecular representations where a single node represents multiple atoms and may serve as practical guidance for practitioners designing geometric deep learning models for molecular structure. Funding This work is funded through R35GM140753 from the National Institute of General Medical Sciences. The content is solely the responsibility of the authors and does not necessarily represent the official views of the National Institute of General Medical Sciences or the National Institutes of Health."
|
| 48 |
+
}
|
| 49 |
+
]
|
| 50 |
+
},
|
| 51 |
+
"edge_feat": {}
|
| 52 |
+
}
|
| 53 |
+
}
|
title_31K_G/test_title_long_2404.19740v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19752v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2404.19759v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2405.00057v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2405.00077v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2405.00099v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2405.00175v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2405.00181v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2405.00198v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2405.00201v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2405.00204v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2405.00216v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2405.00218v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2405.00242v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
title_31K_G/test_title_long_2405.00243v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|