| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T03:13:46.448875Z" |
| }, |
| "title": "Analyzing the Effects of Reasoning Types on Cross-Lingual Transfer Performance", |
| "authors": [ |
| { |
| "first": "Aalok", |
| "middle": [], |
| "last": "Sathe", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "asathe@mit.edu" |
| }, |
| { |
| "first": "Somak", |
| "middle": [], |
| "last": "Aditya", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Monojit", |
| "middle": [], |
| "last": "Choudhury", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "monojitc@microsoft.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Multilingual language models achieve impressive zero-shot accuracies in many languages in complex tasks such as Natural Language Inference (NLI). Examples in NLI (and equivalent complex tasks) often pertain to various types of sub-tasks, requiring different kinds of reasoning. Certain types of reasoning have proven to be more difficult to learn in a monolingual context, and in the crosslingual context, similar observations may shed light on zero-shot transfer efficiency and few-shot sample selection. Hence, to investigate the effects of types of reasoning on transfer performance, we propose a category-annotated multilingual NLI dataset and discuss the challenges to scale monolingual annotations to multiple languages. We statistically observe interesting effects that the confluence of reasoning types and language similarities have on transfer performance.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Multilingual language models achieve impressive zero-shot accuracies in many languages in complex tasks such as Natural Language Inference (NLI). Examples in NLI (and equivalent complex tasks) often pertain to various types of sub-tasks, requiring different kinds of reasoning. Certain types of reasoning have proven to be more difficult to learn in a monolingual context, and in the crosslingual context, similar observations may shed light on zero-shot transfer efficiency and few-shot sample selection. Hence, to investigate the effects of types of reasoning on transfer performance, we propose a category-annotated multilingual NLI dataset and discuss the challenges to scale monolingual annotations to multiple languages. We statistically observe interesting effects that the confluence of reasoning types and language similarities have on transfer performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Recent work has shown that masked language models such as XLM (Lample and Conneau, 2019) , mBERT (Devlin et al., 2019) and XLM-RoBERTa (Conneau et al., 2019) can achieve efficient crosslingual transfer, in both zero-shot and few-shot settings. Such results have motivated researchers (K et al., 2020; Artetxe et al., 2020; Conneau et al., 2020) to investigate the underlying influencing factors behind transfer efficiency along different dimensions such as model capacity, language similarity and learning objective. However, such transfer efficiency is measured for natural language understanding (NLU) tasks which often cover a wide range of linguistic phenomena. Natural Language Inference is one such representative task which is known to require different types of reasoning and linguistic phenomena. In the monolingual context, recently, authors in Joshi et al. (2020a) extended previous work (Richardson et al., 2020; Salvatore et al., 2019) and listed a comprehensive list of types of reasoning capabilities required to solve the NLI examples in large public NLI datasets. Authors show that NLI requires a mix of 15 different types of reasoning capabilities (TAXINLI), categorized broadly into LANGUAGE, LOGIC and KNOWL-EDGE. They observe that both BERT (Devlin et al., 2019) and RoBERTa (Liu et al., 2019) perform poorly in reasoning tasks such as causal and coreference, whereas they pick up negation easily; and observe that some categories are intrinsically harder for these models (both Transformers-based and pre-Transformers LSTM-based) to understand. Our central hypothesis of this work is, crosslingual transfer gap and few-shot performance may depend on the type of reasoning required. To this end, we propose multilingual extension of the TAXINLI dataset. Our zero-shot analysis strongly suggests that reasoning types play a critical role in transfer performance. In few-shot, some categories (such as Negation), show steady improvement in accuracy along-with increased cross-lingual alignment. For others (syntactic, logic) improvement varies across language. For causal, few-shot models fail to generalize well on a diagnostic gold set.", |
| "cite_spans": [ |
| { |
| "start": 62, |
| "end": 88, |
| "text": "(Lample and Conneau, 2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 97, |
| "end": 118, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 135, |
| "end": 157, |
| "text": "(Conneau et al., 2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 284, |
| "end": 300, |
| "text": "(K et al., 2020;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 301, |
| "end": 322, |
| "text": "Artetxe et al., 2020;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 323, |
| "end": 344, |
| "text": "Conneau et al., 2020)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 855, |
| "end": 875, |
| "text": "Joshi et al. (2020a)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 899, |
| "end": 924, |
| "text": "(Richardson et al., 2020;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 925, |
| "end": 948, |
| "text": "Salvatore et al., 2019)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1262, |
| "end": 1283, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 1296, |
| "end": 1314, |
| "text": "(Liu et al., 2019)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In monolingual settings, various authors (Nie et al., 2019; Wang et al., 2018; Ribeiro et al., 2020) have proposed a shift from tracking end-to-end task accuracy to explicit categorizations of fundamental capabilities and to track such individual categorywise errors. While some of the proposed categories seem to capture relevant reasoning capabilities, they are often incomplete as they are tuned towards analyzing errors. Joshi et al. (2020a) recently proposed a categorization of the reasoning tasks involved in NLI. It takes inspiration from earlier literature in linguistics (Wittgenstein, 1922) and logic (Sowa, 2010) . According to Wittgenstein (1922) , humans require a combination (or a sequence) of lexical, syntactic, semantic and pragmatic reasoning to gauge meaning from language (or form). The TAXINLI categorization scheme, which defines three broad groups of reasoning LINGUISTIC, LOGICAL and KNOWLEDGE, aligns with our philosophy. Additionally, the proposed categorization is also useful because it retains the categories that are relevant for public NLI datasets, and the granularity is well-defined. Further, the availability of the annotated dataset makes it a perfect choice for the current study. We introduce the categories briefly. For detailed definitions and examples, we refer the readers to Joshi et al. (2020a) . LINGUISTIC represents NLI examples where inference process is internal to the provided text; further divided into three categories lexical, syntactic and factivity. LOGICAL denotes examples where inference may involve processes external to text, and grouped under Connectives, and Deduction. Connectives involve categories such as Negation, boolean, quantifiers, conditionals and comparatives. Lastly, Deduction involves different types of reasoning such as relational, spatial, causal, temporal and coreference. Knowledge indicates examples where external (world) or commonly assumed knowledge (commonsense) is required for inferencing. ", |
| "cite_spans": [ |
| { |
| "start": 41, |
| "end": 59, |
| "text": "(Nie et al., 2019;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 60, |
| "end": 78, |
| "text": "Wang et al., 2018;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 79, |
| "end": 100, |
| "text": "Ribeiro et al., 2020)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 425, |
| "end": 445, |
| "text": "Joshi et al. (2020a)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 581, |
| "end": 601, |
| "text": "(Wittgenstein, 1922)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 612, |
| "end": 624, |
| "text": "(Sowa, 2010)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 640, |
| "end": 659, |
| "text": "Wittgenstein (1922)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 1320, |
| "end": 1340, |
| "text": "Joshi et al. (2020a)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TaxiXNLI: Towards Multilingual Extension of TaxiNLI", |
| "sec_num": "2" |
| }, |
| { |
| "text": "TaxiXNLI (translated) The TAXINLI dataset (Joshi et al., 2020a) consists of 10071 premise, hypothesis (P-H) pairs drawn from the MultiNLI (MNLI) dataset (Williams et al., 2018a) . Each pair is annotated with the types of reasoning (among 15 categories) that are required to make the inference. To account for the asymmetric distribution of exam-ples across taxonomic categories, we use the hierarchical taxonomy provided by Joshi et al. (2020a) to group some of the related categories together 1 . For our crosslingual analysis, we split the English examples equally 2 into train and test set (5k/5k) by balancing across taxonomic categories through iterative stratification (Sechidis et al., 2011) . Inspired by prior work (Gerz et al., 2018; Joshi et al., 2020b) , we choose Spanish, French, Russian, Hindi, Arabic, Vietnamese, Chinese, Swahili, and Urdu; which collectively cover a spectrum of varying resource (high -4, medium -3, low -2) and typological diversity. We use the Bing translator to translate both train and test set into these nine languages. Due to the noise inherent in automatic translation, first we perform a qualitative analysis of translated examples. Following Joshi et al. (2020a), we conduct manual annotations of TAXINLI categories for a subset of translated pairs in three languages: Swahili, Hindi and Spanish, by native speakers. In addition to categories, we instruct them to indicate full, partial, and no noise translations. Among them, there were 8.95% (18.4) and 36.81% (39.8) were fully (and partially) noisy sentence-pairs in Hindi and Swahili, and Spanish had 38% partially noisy sentences. The percentage of examples where a category in English is 1, but the same in a translated language is zero are 44 (hi), 34 (sw) and 32% (es) respectively. Upon further analysis and interviews with the annotators, we discover changes (from 1 to 0) are due to 2 reasons: 1) partially noisy translations that make the premise and hypothesis somewhat unrelated; and, the TAXINLI instructions instruct annotators not to proceed with category annotations for un-related neutral examples; 2) difference in reasoning adapted by individual annotators. Our analysis (detailed in next section), aligns with the intuition from the definitions that, translation to different languages does not change the reasoning categories in the LOGICAL and KNOWLEDGE bucket.", |
| "cite_spans": [ |
| { |
| "start": 42, |
| "end": 63, |
| "text": "(Joshi et al., 2020a)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 153, |
| "end": 177, |
| "text": "(Williams et al., 2018a)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 424, |
| "end": 444, |
| "text": "Joshi et al. (2020a)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 675, |
| "end": 698, |
| "text": "(Sechidis et al., 2011)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 724, |
| "end": 743, |
| "text": "(Gerz et al., 2018;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 744, |
| "end": 764, |
| "text": "Joshi et al., 2020b)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TaxiXNLI Dataset", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "TaxiXNLI (diagnostic) Motivated by the above study, we look towards the XNLI dataset (Conneau et al., 2018) , that provides parallel P-H pairs in 15 languages. We sample 1.4k XNLI examples and annotate with a few selected interesting categories (Negation, Boolean, Spatial, Causal, Temporal, Knowledge). This is used as a diagnostic gold The handling and processing of the reagent, commonly limestone, is often done onsite, as is the treatment of the effluent as waste or processing into a saleable product (e.g.", |
| "cite_spans": [ |
| { |
| "start": 85, |
| "end": 107, |
| "text": "(Conneau et al., 2018)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TaxiXNLI Dataset", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The reagent is more often than not limestone, and is handled and processed onsite. test set for our zero-shot and few-shot experiments. We will share the dataset publicly in github.com/ microsoft/TaxiXNLI.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TaxiXNLI Dataset", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u0905", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TaxiXNLI Dataset", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "To gauge the errors due to automated translation of English to different languages and inheriting the reasoning types from TAXINLI annotations, we hire na-tive speakers in Hindi, Spanish and Swahili to reannotate 200 carefully balanced samples. The annotators were trained similarly as described in Joshi et al. (2020a) ; and were additionally instructed to label noisy (partial or full) example pairs. We share the results in Table 1 . Interviews and Further Analysis Our goal was to investigate two questions: 1) how noisy are the translations, 2) for non-noisy translations, do a reasoning category stay the same across languages.", |
| "cite_spans": [ |
| { |
| "start": 299, |
| "end": 319, |
| "text": "Joshi et al. (2020a)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 427, |
| "end": 434, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Automated Translation Error Analysis", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "While the noise level in both Hindi and Spanish is lower, translation accuracy for low-resource languages are still pretty poor. This prompted us to create the TaxiXNLI (diagnostic). We wanted to further investigate the second question, as the percentage of #tr=0,en=1 (indicating the categories which was present in English, but was not when translated to the corresponding target language) is lower than expected. After conducting an interview with the Swahili and the Hindi annotators about mismatching annotations, we found the following: 1) partially noisy translations often made the premise and hypothesis somewhat unrelated; and, based on Table 5 : Transfer Gap and TAXINLI categories: For each category, we report the transfer gap averaged across languages and the reduction in the gap when we move from M-BERT to XLM-R.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 647, |
| "end": 654, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Automated Translation Error Analysis", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "For our experiments, we fine-tune Multilingual BERT and XLM-R (base) 3 on MultiNLI training (Williams et al., 2018b) data and select the best model using the MultiNLI validation set. We repeat the fine-tuning with 5 different seed values and for learning-rate in {2e-5, 3e-5}. For zero-shot experiments, we evaluate the best model on TaxiXNLI datasets for each language and taxonomic category. We measure the crosslingual transferability using transfer gap which is defined as the difference between the performance on English and target language. Whereas, for few-shot learning, for each of the target language and inference type pair, we continue to fine-tune the best MultiNLI fine-tuned XLM-R and M-BERT model using a random few Tax-iXNLI (translated) training examples in that particular pair and then test the model on the same language and inference type pair. We repeat all our experiments with 5 different seeds (different examples each time) and report the average. For our analysis, we measure relative error reduction", |
| "cite_spans": [ |
| { |
| "start": 92, |
| "end": 116, |
| "text": "(Williams et al., 2018b)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3" |
| }, |
| { |
| "text": "(RER = (accuracy N \u2212accuracy 0 ) 100\u2212accuracy 0", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3" |
| }, |
| { |
| "text": ") which provides measure of the error reduction from the initial step (zeroshot), relative to the initial step.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In this section, we discuss some important insights that we observe through the zero-shot and few-shot experiments. We highlight phenomena which are repeated across TaxiXNLI (translated) and Tax-iXNLI (diagnostic) datasets. Figure 1 and 2, we observe that the transferability of different inference types are non-uniform, especially in the case of XLM-R. In particular, transferability of negation (except for Swahili) is a lot better than lexical, syntactic, boolean, or knowledge inference types. The results on the TaxiXNLI (diagnostic) test confirms these observations. Further, from the results on TaxiXNLI (translated), in Table 5 , we observe that the transfer gap for some categories such as negation, boolean can be reduced significantly by increasing the monolingual pre-training data (from M-BERT to XLM- Why does zero-shot transfer of Negation seem easy? And, why is Swahili the odd-one-out? Negation seems easiest to transfer. A possible cause is in most languages, there are standard negation keywords. In English (among 1376 examples, TaxiXNLI-translated), most frequently occurring words are: not (543), never (172), no (260), didn't (73), No (42), nothing (45). It's possible that pretrained LMs learn correspondence between English and target language negation keywords. However, in Swahili, zero-shot transfer for Negation is bad. We observe that Swahili uses fused morphemes to indicate negation; for example, negation marker \"Si\" used for the pronoun \"Mimi\" (I), \"Hatu\" for \"Sisi\" (we). Sentence \" I am not singing\" becomes \"Siimbi\"; \"We are not singing\" becomes \"Hatuiimbi\". It's possible that with the comparatively limited pre-training data, XLM-R does not learn the correspondence between separate negative particles and fused morphemes.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 224, |
| "end": 232, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 629, |
| "end": 636, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "CrossLingual Cross-capability Insights", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Language features or Reasoning categories -which affects transfer more? Inspired by Lauscher et al. (2020) , we extend their regression analysis to include category features. Since, multiple categories can exist per example, we create a feature-set for each example in TaxiXNLI (translated) test set. We include Lang2Vec features of its corresponding language (syntax, phonology, inventory, family, and GEO), and one-hot category vector (1 if it belongs to the category otherwise zero). We then use logistic regression (LR) and linear discriminant analysis (LDA) to predict which factors influence XLM-R's correct prediction (0/1) on TaxiXNLI (translated) dataset. As shown in Figure 3 , with high p-values, negation, logic and deductions affect XLM-R's zero-shot performance. Interestingly, for mBERT, the language features correlate more highly with correct prediction.", |
| "cite_spans": [ |
| { |
| "start": 84, |
| "end": 106, |
| "text": "Lauscher et al. (2020)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 677, |
| "end": 685, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "CrossLingual Cross-capability Insights", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We plot the relative error reduction in the few-shot scenario (refer Fig. 4, and Fig. 5 ) for XLM-R. We only retain the high-resource languages for both, as translations for high-resource languages are quite accurate. In the TaxiXNLI (translated) set, for boolean, causal, deduction and knowledge, all languages improve uniformly with small variations. However, on TaxiXNLI (diagnostic) test, there is a large decrease in RER for causal and other categories. To investigate deeper, we analyze if few-shot improves crosslingual alignment. For each category, we take ([CLS]) embeddings of the English (pivot) sentence and its translated counterpart in target language from TaxiXNLI (translated) -and compute their pair-wise distances. In Table 6 , We report the difference between average distance after fewshot (highest N) and at zero-shot. Both L 2 distance (and cosine) measure shows that for negation, the crosslingual alignment is increasing after fewshot. Though, results on the diagnostic set calls for careful experiments to test generalization. Table 6 : Difference between L2 distances of sentencepairs for each categories before and after few-shot. For negation, there is large decrease in distances, indicating higher crosslingual alignment.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 69, |
| "end": 87, |
| "text": "Fig. 4, and Fig. 5", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 736, |
| "end": 743, |
| "text": "Table 6", |
| "ref_id": null |
| }, |
| { |
| "start": 1052, |
| "end": 1059, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Do all categories improve under few-shot?", |
| "sec_num": null |
| }, |
| { |
| "text": "We proposed a multilingual extension of a reasoning type-annotated NLI dataset (TaxiNLI) in English -TaxiXNLI (translated) and a gold test set TaxiXNLI (diagnostic) to analyze transfer efficiency of multilingual language models. Our analysis strongly indicates that reasoning types play an important role in transfer capability. Our few-shot results demonstrate that for types such as Negation, improvement in transfer accuracy co-occurs with improved crosslingual alignment. For causal, few-shot models fail to generalize well on the diagnostic set; and some categories hardly improve. In summary, NLI being central to NLU and NLP, this dataset and our ensuing analysis provides a way to broadly quantify the gaps in zero-shot transfer across various linguistic and logical dimensions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Findings and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Logic: {Quantifier, Conditional, Comparative}; Deductions: {Relational, Spatial, Coreference, Temporal}; Knowledge: {World, Taxonomic} 2 Category-wise splits are in Tab. 4.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "github.com/huggingface/transformers", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "On the cross-lingual transferability of monolingual representations", |
| "authors": [ |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Artetxe", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| }, |
| { |
| "first": "Dani", |
| "middle": [], |
| "last": "Yogatama", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.421" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikel Artetxe, Sebastian Ruder, and Dani Yogatama. 2020. On the cross-lingual transferability of mono- lingual representations. In Proceedings of the 58th", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Annual Meeting of the Association for Computational Linguistics", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "4623--4637", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Annual Meeting of the Association for Computational Linguistics, pages 4623-4637, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Unsupervised cross-lingual representation learning at scale", |
| "authors": [ |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Guzm\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1911.02116" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guzm\u00e1n, Edouard Grave, Myle Ott, Luke Zettle- moyer, and Veselin Stoyanov. 2019. Unsupervised cross-lingual representation learning at scale. arXiv preprint arXiv:1911.02116.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Xnli: Evaluating crosslingual sentence representations", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruty", |
| "middle": [], |
| "last": "Rinott", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Adina", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [ |
| "R" |
| ], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "Holger", |
| "middle": [], |
| "last": "Schwenk", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Ruty Rinott, Guillaume Lample, Ad- ina Williams, Samuel R. Bowman, Holger Schwenk, and Veselin Stoyanov. 2018. Xnli: Evaluating cross- lingual sentence representations. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Emerging crosslingual structure in pretrained language models", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Shijie", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Haoran", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "6022--6034", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.536" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Shijie Wu, Haoran Li, Luke Zettle- moyer, and Veselin Stoyanov. 2020. Emerging cross- lingual structure in pretrained language models. In Proceedings of the 58th Annual Meeting of the Associ- ation for Computational Linguistics, pages 6022-6034, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understand- ing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Com- putational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "On the relation between linguistic typology and (limitations of) multilingual language modeling", |
| "authors": [ |
| { |
| "first": "Daniela", |
| "middle": [], |
| "last": "Gerz", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Edoardo", |
| "suffix": "" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Ponti", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "316--327", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniela Gerz, Ivan Vuli\u0107, Edoardo Maria Ponti, Roi Reichart, and Anna Korhonen. 2018. On the rela- tion between linguistic typology and (limitations of) multilingual language modeling. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 316-327.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Xtreme: A massively multilingual multi-task benchmark for evaluating cross-lingual generalization. CoRR, abs", |
| "authors": [ |
| { |
| "first": "Junjie", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| }, |
| { |
| "first": "Aditya", |
| "middle": [], |
| "last": "Siddhant", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| }, |
| { |
| "first": "Orhan", |
| "middle": [], |
| "last": "Firat", |
| "suffix": "" |
| }, |
| { |
| "first": "Melvin", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Junjie Hu, Sebastian Ruder, Aditya Siddhant, Gra- ham Neubig, Orhan Firat, and Melvin Johnson. 2020. Xtreme: A massively multilingual multi-task benchmark for evaluating cross-lingual generalization. CoRR, abs/2003.11080.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Taxinli: Taking a ride up the nlu hill", |
| "authors": [ |
| { |
| "first": "Pratik", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Somak", |
| "middle": [], |
| "last": "Aditya", |
| "suffix": "" |
| }, |
| { |
| "first": "Aalok", |
| "middle": [], |
| "last": "Sathe", |
| "suffix": "" |
| }, |
| { |
| "first": "Monojit", |
| "middle": [], |
| "last": "Choudhury", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "CoNLL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pratik Joshi, Somak Aditya, Aalok Sathe, and Monojit Choudhury. 2020a. Taxinli: Taking a ride up the nlu hill. In CoNLL.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "The state and fate of linguistic diversity and inclusion in the NLP world", |
| "authors": [ |
| { |
| "first": "Pratik", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastin", |
| "middle": [], |
| "last": "Santy", |
| "suffix": "" |
| }, |
| { |
| "first": "Amar", |
| "middle": [], |
| "last": "Budhiraja", |
| "suffix": "" |
| }, |
| { |
| "first": "Kalika", |
| "middle": [], |
| "last": "Bali", |
| "suffix": "" |
| }, |
| { |
| "first": "Monojit", |
| "middle": [], |
| "last": "Choudhury", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "6282--6293", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.560" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pratik Joshi, Sebastin Santy, Amar Budhiraja, Kalika Bali, and Monojit Choudhury. 2020b. The state and fate of linguistic diversity and inclusion in the NLP world. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 6282-6293, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Cross-lingual ability of multilingual BERT: an empirical study", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Karthikeyan", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Mayhew", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "8th International Conference on Learning Representations", |
| "volume": "2020", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karthikeyan K, Zihan Wang, Stephen Mayhew, and Dan Roth. 2020. Cross-lingual ability of multilin- gual BERT: an empirical study. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenRe- view.net.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Crosslingual language model pretraining", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Neural Information Processing Systems (NeurIPS)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Lample and Alexis Conneau. 2019. Cross- lingual language model pretraining. Advances in Neu- ral Information Processing Systems (NeurIPS).", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "From zero to hero: On the limitations of zero-shot language transfer with multilingual transformers", |
| "authors": [ |
| { |
| "first": "Anne", |
| "middle": [], |
| "last": "Lauscher", |
| "suffix": "" |
| }, |
| { |
| "first": "Vinit", |
| "middle": [], |
| "last": "Ravishankar", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Goran", |
| "middle": [], |
| "last": "Glava\u0161", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "4483--4499", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anne Lauscher, Vinit Ravishankar, Ivan Vuli\u0107, and Goran Glava\u0161. 2020. From zero to hero: On the lim- itations of zero-shot language transfer with multilin- gual transformers. In Proceedings of the 2020 Confer- ence on Empirical Methods in Natural Language Pro- cessing (EMNLP), pages 4483-4499.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Roberta: A robustly optimized bert pretraining approach", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1907.11692" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Adversarial nli: A new benchmark for natural language understanding", |
| "authors": [ |
| { |
| "first": "Yixin", |
| "middle": [], |
| "last": "Nie", |
| "suffix": "" |
| }, |
| { |
| "first": "Adina", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Dinan", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1910.14599" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yixin Nie, Adina Williams, Emily Dinan, Mohit Bansal, Jason Weston, and Douwe Kiela. 2019. Adversarial nli: A new benchmark for natural language under- standing. arXiv preprint arXiv:1910.14599.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Beyond accuracy: Behavioral testing of NLP models with CheckList", |
| "authors": [ |
| { |
| "first": "Tongshuang", |
| "middle": [], |
| "last": "Marco Tulio Ribeiro", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Guestrin", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "4902--4912", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Tulio Ribeiro, Tongshuang Wu, Carlos Guestrin, and Sameer Singh. 2020. Beyond accuracy: Behav- ioral testing of NLP models with CheckList. In Pro- ceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4902-4912, On- line. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Probing natural language inference models through semantic fragments", |
| "authors": [ |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Richardson", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lawrence", |
| "middle": [], |
| "last": "Moss", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Sabharwal", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "34", |
| "issue": "", |
| "pages": "8713--8721", |
| "other_ids": { |
| "DOI": [ |
| "10.1609/aaai.v34i05.6397" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kyle Richardson, Hai Hu, Lawrence Moss, and Ashish Sabharwal. 2020. Probing natural language infer- ence models through semantic fragments. Proceed- ings of the AAAI Conference on Artificial Intelligence, 34:8713-8721.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A logical-based corpus for cross-lingual evaluation", |
| "authors": [ |
| { |
| "first": "Felipe", |
| "middle": [], |
| "last": "Salvatore", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcelo", |
| "middle": [], |
| "last": "Finger", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Hirata", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2nd Workshop on Deep Learning Approaches for Low-Resource NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "22--30", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Felipe Salvatore, Marcelo Finger, and Roberto Hirata Jr. 2019. A logical-based corpus for cross-lingual eval- uation. In Proceedings of the 2nd Workshop on Deep Learning Approaches for Low-Resource NLP (DeepLo 2019), pages 22-30.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "On the stratification of multilabel data", |
| "authors": [ |
| { |
| "first": "Konstantinos", |
| "middle": [], |
| "last": "Sechidis", |
| "suffix": "" |
| }, |
| { |
| "first": "Grigorios", |
| "middle": [], |
| "last": "Tsoumakas", |
| "suffix": "" |
| }, |
| { |
| "first": "Ioannis", |
| "middle": [], |
| "last": "Vlahavas", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Machine Learning and Knowledge Discovery in Databases", |
| "volume": "", |
| "issue": "", |
| "pages": "145--158", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Konstantinos Sechidis, Grigorios Tsoumakas, and Ioan- nis Vlahavas. 2011. On the stratification of multi- label data. Machine Learning and Knowledge Discov- ery in Databases, pages 145-158.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "The role of logic and ontology in language and reasoning", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "John", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sowa", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Theory and applications of ontology: philosophical perspectives", |
| "volume": "", |
| "issue": "", |
| "pages": "231--263", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John F Sowa. 2010. The role of logic and ontology in lan- guage and reasoning. In Theory and applications of ontology: philosophical perspectives, pages 231-263. Springer.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Glue: A multi-task benchmark and analysis platform for natural language understanding", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanpreet", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel R", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. 2018. Glue: A multi-task benchmark and analysis platform for natu- ral language understanding. EMNLP 2018, page 353.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "A broad-coverage challenge corpus for sentence understanding through inference", |
| "authors": [ |
| { |
| "first": "Adina", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikita", |
| "middle": [], |
| "last": "Nangia", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "1112--1122", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adina Williams, Nikita Nangia, and Samuel Bowman. 2018a. A broad-coverage challenge corpus for sen- tence understanding through inference. In Proceed- ings of the 2018 Conference of the North American Chapter of the Association for Computational Linguis- tics: Human Language Technologies, Volume 1 (Long Papers), pages 1112-1122. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "A broad-coverage challenge corpus for sentence understanding through inference", |
| "authors": [ |
| { |
| "first": "Adina", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikita", |
| "middle": [], |
| "last": "Nangia", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "1112--1122", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-1101" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adina Williams, Nikita Nangia, and Samuel Bowman. 2018b. A broad-coverage challenge corpus for sen- tence understanding through inference. In Proceed- ings of the 2018 Conference of the North American Chapter of the Association for Computational Linguis- tics: Human Language Technologies, Volume 1 (Long Papers), pages 1112-1122, New Orleans, Louisiana. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Tractatus logico-philosophicus. London: Routledge", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Wittgenstein", |
| "suffix": "" |
| } |
| ], |
| "year": 1922, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "L. Wittgenstein. 1922. Tractatus logico-philosophicus. London: Routledge, 1981.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "For each TAXINLI category and language, we plot the transfer gap on XLM-R, for both datasets." |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "For each TAXINLI category and language, we plot the transfer gap on mBERT, for both datasets." |
| }, |
| "FIGREF2": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "LDA and Regression coefficients obtained to model the NLI prediction correctness on TaxiXNLI (translated) by XLM-R (left) and M-BERT (Right), given language features and reasoning categories. (SYN/negation: ***, Logic: *, p-value \u2264 0***, 0.001**, 0.01*)" |
| }, |
| "FIGREF3": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "Few Shot Accuracy (TAXIXNLI (translated), XLM-R):For each TAXINLI category and language, we plot Accuracy on TAXIXNLI (translated) test, when XLM-R MultiNLI model is fine-tuned on N examples in that language and category" |
| }, |
| "FIGREF4": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "Few Shot Accuracy (TAXIXNLI (diagnostic), XLM-R): For each TAXINLI category and language, we plot Accuracy on TAXIXNLI (diagnostic) test, when XLM-R MultiNLI model is fine-tuned on N examples in that language and category Is Transferability uniform across categories? Does more pre-training help? From" |
| }, |
| "FIGREF5": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "Few Shot Accuracy (TAXIXNLI (translated), mBERT): For each TAXINLI category and language, we plot Accuracy on TAXIXNLI (translated) test, when mBERT MultiNLI model is fine-tuned on N examples in that language and categoryFigure 7: Few Shot Accuracy (TAXIXNLI (diagnostic), mBERT): For each TAXINLI category and language, we plot Accuracy on TAXIXNLI (diagnostic) test, when mBERT MultiNLI model is fine-tuned on N examples in that language and category R), whereas for categories such as knowledge and deductions the transfer gap is not reduced much." |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "content": "<table><tr><td>: We show the percentage of full and partial noisy</td></tr><tr><td>samples; F1, Accuracy scores with respect to the English</td></tr><tr><td>TAXINLI annotations for each language on the sampled</td></tr><tr><td>set of examples. We also include percentage of category</td></tr><tr><td>annotations where the annotated target category is zero,</td></tr><tr><td>while the corresponding category in English is 1.</td></tr></table>", |
| "text": "", |
| "html": null, |
| "num": null |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "content": "<table><tr><td>Category</td><td>P (eng)</td><td>H (eng)</td><td>P (target)</td><td>H (target)</td></tr><tr><td/><td/><td>Swahili</td><td/><td/></tr><tr><td>E1 world (eng)</td><td>Middle Ages</td><td>The time period during which the Black Plague happened.</td><td>Zama za kati</td><td>Kipindi cha wakati ambapo pigo la Black lilitokea.</td></tr><tr><td/><td/><td/><td>Karmeli Man, uhusiano wa</td><td/></tr><tr><td>E2 taxonomic (swa)</td><td/><td/><td>familia ya Neanderthal, aliishi</td><td>Karmeli Man si hai leo.</td></tr><tr><td/><td/><td/><td>hapa miaka 600,000 iliyopita.</td><td/></tr><tr><td>E3 spatial (swa)</td><td>I'm from New York.</td><td>I'm from Texas.</td><td>Mimi ni kutoka New York.</td><td>Mimi ni kutoka Texas.</td></tr><tr><td/><td>They need individual support to attend</td><td/><td>Wanahitaji msaada wa kibinafsi</td><td/></tr><tr><td>E4 temporal (eng)</td><td>conferences, present papers, publish their works, and keep in touch with others in their fields across the</td><td>They need a lot of funding to do their work effectively.</td><td>kuhudhuria mikutano, kuwasilisha makaratasi, kuchapisha kazi zao, na kuwasiliana na wengine katika</td><td>Wanahitaji fedha nyingi kufanya kazi zao kwa ufanisi.</td></tr><tr><td/><td>country.</td><td/><td>nyanja zao nchini kote.</td><td/></tr><tr><td>E5 temporal (swa)</td><td>So he says, No, from the school up, are all guerrillas.</td><td>He says everyone in this community is pacifistic.</td><td>Hivyo anasema, Hapana, kutoka shule, wote ni waasi.</td><td>Anasema kila mtu katika jamii hii ana mpango wa kutelezi.</td></tr><tr><td>E6 negation (eng)</td><td>So it's almost like a second home.</td><td>Alas, I have never been to such a place.</td><td>Hivyo ni karibu kama nyumba ya pili.</td><td>Ole, sijawahi kuwa na sehemu hiyo.</td></tr><tr><td>E7 negation (swa)</td><td>He argued that other extremists, who aimed at local rulers or Israel, did not go far enough.</td><td>He held the stance that the other extremists should have went further.</td><td>Alisema kwamba wengine wenye siasa kali, ambao walikusudia viongozi wa ndani au Israeli, hawakwenda mbali sana.</td><td>Yeye alishikilia msimamo kwamba wengine wenye siasa kali wanapaswa kwenda zaidi.</td></tr></table>", |
| "text": "Carmel Man, a relation of the Neanderthal family, lived here 600,000 years ago.Carmel Man isn't alive today.", |
| "html": null, |
| "num": null |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "content": "<table><tr><td>Category</td><td>P (eng)</td><td>H (eng)</td><td>P (target)</td><td/><td>H (target)</td></tr><tr><td/><td/><td/><td>Hindi</td><td/><td/></tr><tr><td/><td>As a result, their services may be more</td><td>Their services might be more</td><td/><td/><td/></tr><tr><td>E1 world (eng)</td><td>effective when conducted in the</td><td>effective if they're done in the</td><td/><td/><td/></tr><tr><td/><td>emergency department environment.</td><td>ED.</td><td/><td/><td/></tr><tr><td>E2 world (hi)</td><td>Twenty islands have since been added, making the Cyclades the largest of the Greek island groups.</td><td>They have added just three more islands.</td><td/><td/><td/></tr><tr><td/><td>Favored by the Ancient Egyptians as</td><td/><td/><td/><td/></tr><tr><td>E3 taxonomic (eng)</td><td>a source of turquoise, the Sinai was, until recently, famed for only one event but certainly an important</td><td>The Sinai was a source of gold for the Ancient Egyptians.</td><td/><td/><td/></tr><tr><td/><td>one.</td><td/><td/><td/><td/></tr><tr><td/><td>'And I don't want to risk a fire fight</td><td/><td/><td/><td/></tr><tr><td>E4 taxonomic( hi)</td><td>with what appear to be horribly equal numbers.'</td><td>I don't want to get in a fight.</td><td>\u0928\u0939\u0940\u0902 \u0915\u0930\u0928\u093e \u091a\u093e\u0939\u0924\u0940 \u0964</td><td>\u091c\u094b \u0916\u092e</td><td>\u092e\u0948\u0902 \u0932\u095c\u093e\u0908 \u092e\u0947\u0902 \u0928\u0939\u0940\u0902 \u092a\u095c\u0928\u093e \u091a\u093e\u0939\u0924\u093e \u0964</td></tr><tr><td>E5 spatial (eng)</td><td>On the slopes of the hill you will find Edinburgh Zoo, located just behind Corstorphine Hospital.</td><td>Corstophine hospital is really far fromEdinburgh Zoo</td><td colspan=\"2\">\u092a\u0939\u093e\u095c\u0940 \u0915 \u0922\u0932\u093e\u0928\u094b\u0902 \u092a\u0930 \u0906\u092a\u0915\u094b \u090f \u0921\u0928\u092c\u0917\u0930\u094d \u091a\u093f\u095c\u092f\u093e\u0918\u0930 \u093f\u092e\u0932\u0947 \u0917\u093e, \u091c\u094b \u0915\u0949\u0938\u094d\u091f\u0949 \u0930\u094d \u092b\u0928 \u0905\u0938\u094d\u092a\u0924\u093e\u0932 \u0915\u0947 \u0920\u0940\u0915 \u092a\u0940\u091b\u0947 \u0938\u094d\u0925\u0924 \u0939\u0948 \u0964</td><td>Corstophine \u0905\u0938\u094d\u092a\u0924\u093e\u0932 \u0935\u093e\u0938\u094d\u0924\u0935 \u092e\u0947\u0902 \u090f \u0921\u0928\u092c\u0917\u0930\u094d \u091a\u093f\u095c\u092f\u093e\u0918\u0930 \u0938\u0947 \u0926\u0942 \u0930 \u0939\u0948</td></tr><tr><td>E6 temporal (eng)</td><td>Both individuals agreed that the teleconference played no role in coordinating a response to the attacks of 9/11.</td><td>Neither party claimed that the phone meeting of the prior morning had been responsible for the actions taken in relation to the attacks on 9/11.</td><td/><td/><td/></tr><tr><td>E7 temporal (hi)</td><td>By 1993, Penney was using EDI for processing 97 percent of purchase orders and 85 percent of invoices with 3,400 of its 4,000 suppliers.</td><td>Penney was using EDI in 1993 for processing much of its supply information.</td><td colspan=\"2\">1993 \u0924\u0915, \u092a\u0947 \u0928\u0940 \u0916\u0930\u0940\u0926 \u0906\u0926\u0947 \u0936\u094b\u0902 \u0915\u0947 97 \u092a\u094d\u0930 \u0924\u0936\u0924 \u0914\u0930 85 \u092a\u094d\u0930 \u0924\u0936\u0924 \u091a\u093e\u0932\u093e\u0928\u094b\u0902 \u0915\u094b \u0905\u092a\u0928\u0947 4,000 \u0906\u092a\u0942 \u0924\u0915\u0924\u093e\u0930\u094d \u0913\u0902 \u0915\u0947 3,400 \u0915\u0947 \u0938\u093e\u0925 \u0938\u0902 \u0938\u093e \u0927\u0924 \u0915\u0930\u0928\u0947 \u0915\u0947 \u0932\u090f \u0908\u0921\u0940\u0906\u0908 \u0915\u093e \u0909\u092a\u092f\u094b\u0917 \u0915\u0930 \u0930\u0939\u093e \u0925\u093e\u0964</td><td>\u092a\u0947 \u0928\u0940 \u0905\u092a\u0928\u0940 \u0906\u092a\u0942 \u0924 \u0915 \u0905 \u0927\u0915\u093e\u0902 \u0936 \u091c\u093e\u0928\u0915\u093e\u0930\u0940 \u0915\u0947 \u092a\u094d\u0930\u0938\u0902 \u0938\u094d\u0915\u0930\u0923 \u0915\u0947 \u0932\u090f 1993 \u092e\u0947\u0902 \u0908\u0926\u0940 \u0915\u093e \u0909\u092a\u092f\u094b\u0917 \u0915\u0930 \u0930\u0939\u093e \u0925\u093e\u0964</td></tr><tr><td>E8 negation (eng)</td><td/><td/><td/><td/><td/></tr></table>", |
| "text": "Interesting examples, where a category is marked in one language, but not in the other. The categories are mentioned in the second column, and in braces, we mention the language where the category is marked present. For example, in E1, category \"world\" is marked only in English. E1: a translation error, so marked neutral, E2: annotator needed to look up \"Karmeli man\", and understand relation with Neanderthal, E3: Swahili annotator used geographic reasoning, E4: English annotation is dubious, E5: wrong annotation in Swahili, E6: partial translation error, so neutral, E7: Swahili annotator misunderstood the sentence.\u0928\u0924\u0940\u091c\u0924\u0928, \u0906\u092a\u093e\u0924\u0915\u093e\u0932\u0940\u0928 \u093f\u0935\u092d\u093e\u0917 \u0915\u0947 \u092e\u093e\u0939\u094c\u0932 \u092e\u0947\u0902 \u0906\u092f\u094b \u091c\u0924 \u0939\u094b\u0928\u0947 \u092a\u0930 \u0909\u0928\u0915 \u0938\u0947 \u0935\u093e\u090f\u0902 \u0905 \u0927\u0915 \u092a\u094d\u0930\u092d\u093e\u0935\u0940 \u0939\u094b \u0938\u0915\u0924\u0940 \u0939\u0948\u0902 \u0964 \u092f\u093f\u0926 \u0935\u0947 \u0908\u0921\u0940 \u092e\u0947\u0902 \u093f\u0915\u090f \u091c\u093e\u0924\u0947 \u0939\u0948\u0902 \u0924\u094b \u0909\u0928\u0915 \u0938\u0947 \u0935\u093e\u090f\u0902 \u0905 \u0927\u0915 \u092a\u094d\u0930\u092d\u093e\u0935\u0940 \u0939\u094b \u0938\u0915\u0924\u0940 \u0939\u0948\u0902 \u0964 \u092c\u0940\u0938 \u0926\u094d\u0935\u0940\u092a\u094b\u0902 \u0915\u0947 \u092c\u093e\u0926 \u0938\u0947 \u091c\u094b\u095c\u093e \u0917\u092f\u093e \u0939\u0948 , Cyclades \u0917\u094d\u0930\u0940\u0915 \u0926\u094d\u0935\u0940\u092a \u0938\u092e\u0942 \u0939\u094b\u0902 \u0915\u093e \u0938\u092c\u0938\u0947 \u092c\u095c\u093e \u092c\u0928\u093e \u0964 \u0909\u0928\u094d\u0939\u094b\u0902\u0928\u0947 \u0938\u092b \u0930\u094d \u0924\u0940\u0928 \u0914\u0930 \u0926\u094d\u0935\u0940\u092a \u091c\u094b\u095c\u0947 \u0939\u0948\u0902 \u0964 \u0926\u094b\u0928\u094b\u0902 \u0935\u094d\u092f\u093f\u0915\u094d\u0924\u092f\u094b\u0902 \u0928\u0947 \u0907\u0938 \u092c\u093e\u0924 \u092a\u0930 \u0938\u0939\u092e \u0924 \u0935\u094d\u092f\u0915\u094d\u0924 \u0915 \u093f\u0915 \u091f\u0947 \u0932\u0940\u0915\u093e\u0902 \u092b\u094d\u0930 \u0947\u0902 \u0938 \u0928\u0947 9/11 \u0915\u0947 \u0939\u092e\u0932\u094b\u0902 \u0915\u0947 \u092a\u094d\u0930\u0924\u094d\u092f\u0941 \u0924\u094d\u0924\u0930 \u0915\u0947 \u0938\u092e\u0928\u094d\u0935\u092f \u092e\u0947\u0902 \u0915\u094b\u0908 \u092d\u0942 \u093f\u092e\u0915\u093e \u0928\u0939\u0940\u0902 \u093f\u0928\u092d\u093e\u0908 \u0964 \u0928 \u0924\u094b \u092a\u093e\u091f \u0928\u0947 \u0926\u093e\u0935\u093e \u093f\u0915\u092f\u093e \u093f\u0915 \u092a\u0942 \u0935\u0930\u094d \u0938\u0941 \u092c\u0939 \u0915 \u092b\u094b\u0928 \u092c\u0948 \u0920\u0915 9/11 \u092a\u0930 \u0939\u092e\u0932\u094b\u0902 \u0915\u0947 \u0938\u0902 \u092c\u0902 \u0927 \u092e\u0947\u0902 \u0915 \u0917\u0908 \u0915\u093e\u0930\u0930\u094d \u0935\u093e\u0907\u092f\u094b\u0902 \u0915\u0947 \u0932\u090f \u091c\u092e\u094d\u092e\u0947 \u0926\u093e\u0930 \u0930\u0939\u0940 \u0925\u0940 \u0964", |
| "html": null, |
| "num": null |
| }, |
| "TABREF4": { |
| "type_str": "table", |
| "content": "<table><tr><td>E9 negation (hi)</td><td>CHAPTER 6: HUMAN CAPITAL</td><td>Capital is money, not people.</td><td>\u0905\u0927\u094d\u092f\u093e\u092f 6: \u092e\u093e\u0928\u0935 \u092a\u0942 \u0902 \u091c\u0940</td><td>\u092a\u0948 \u0938\u093e \u0939\u0948 , \u0932\u094b\u0917\u094b\u0902 \u0915\u094b \u0928\u0939\u0940\u0902\u0964</td></tr></table>", |
| "text": "\u092d\u0915\u0930\u094d \u092a, \u0906\u092e\u0924\u094c\u0930 \u092a\u0930 \u091a\u0942 \u0928\u093e \u092a\u0924\u094d\u0925\u0930 \u0915 \u0939\u0948\u0902 \u0921 \u0932\u0917 \u0914\u0930 \u092a\u094d\u0930\u0938\u0902 \u0938\u094d\u0915\u0930\u0923 \u0905\u0915\u094d\u0938\u0930 \u0911\u0928\u0938\u093e\u0907\u091f \u093f\u0915\u092f\u093e \u091c\u093e\u0924\u093e \u0939\u0948 , \u091c\u0948 \u0938\u093e \u093f\u0915 \u0905\u092a \u0936\u0937\u094d\u091f \u092f\u093e \u092a\u094d\u0930\u0938\u0902 \u0938\u094d\u0915\u0930\u0923 \u0915\u0947 \u0930\u0942\u092a \u092e\u0947\u0902 \u092c\u093f\u0939 \u093e\u0935 \u0915\u093e \u0909\u092a\u091a\u093e\u0930 \u090f\u0915 \u093f\u092c\u0915\u094d\u0930\u0940 \u092f\u094b\u0917\u094d\u092f \u0909\u0924\u094d\u092a\u093e\u0926 \u0909\u0926\u093e\u0939\u0930\u0923 \u0915\u0947 \u0932\u090f) \u092e\u0947\u0902 \u093f\u0915\u092f\u093e \u091c\u093e\u0924\u093e \u0939\u0948 \u0964 \u0905 \u092d\u0935\u093e\u0915\u094d \u0924 \u091a\u0942 \u0928\u093e \u092a\u0924\u094d\u0925\u0930 \u0915 \u0924\u0941 \u0932\u0928\u093e \u092e\u0947\u0902 \u0905 \u0927\u0915 \u092c\u093e\u0930 \u0939\u094b\u0924\u093e \u0939\u0948 , \u0914\u0930 \u0907\u0938\u0947 \u0911\u0928\u0938\u093e\u0907\u091f \u0938\u0902 \u092d\u093e\u0932\u093e \u0914\u0930 \u0938\u0902 \u0938\u093e \u0927\u0924 \u093f\u0915\u092f\u093e \u091c\u093e\u0924\u093e \u0939\u0964", |
| "html": null, |
| "num": null |
| }, |
| "TABREF5": { |
| "type_str": "table", |
| "content": "<table/>", |
| "text": "Similar examples in Hindi. The categories are mentioned in the second column, and in braces, we mention the language where the category is marked present.", |
| "html": null, |
| "num": null |
| }, |
| "TABREF6": { |
| "type_str": "table", |
| "content": "<table><tr><td colspan=\"4\">TAXINLI instructions annotators did not further an-</td></tr><tr><td colspan=\"4\">notate any categories; 2) TAXINLI reasoning cate-</td></tr><tr><td colspan=\"4\">gory annotations are subjective, as different anno-</td></tr><tr><td colspan=\"4\">tators can follow different reasoning process. For</td></tr><tr><td colspan=\"4\">example, for a P: I am from New York. H: I am</td></tr><tr><td colspan=\"4\">from Texas; the Swahili translation was perfect P:</td></tr><tr><td colspan=\"4\">Mimi ni kutoka New York., H: Mimi ni kutoka Texas..</td></tr><tr><td colspan=\"4\">But the Swahili annotator reasoned geographically,</td></tr><tr><td colspan=\"4\">i.e., N.Y is far from Texas, so annotated it as spa-</td></tr><tr><td colspan=\"4\">tial and the English annotator reasoned using world</td></tr><tr><td colspan=\"4\">knowledge; 3) sometimes translation also slightly</td></tr><tr><td colspan=\"4\">changed the word sense. For example, for the hy-</td></tr><tr><td colspan=\"4\">pothesis H: He held the stance that the other extrem-</td></tr><tr><td colspan=\"4\">ists should have went further., the translation was</td></tr><tr><td colspan=\"4\">Yeye alishikilia msimamo kwamba wengine wenye</td></tr><tr><td colspan=\"4\">siasa kali wanapaswa kwenda zaidi., which loosely</td></tr><tr><td colspan=\"4\">translates to \"extremists should have went further in</td></tr><tr><td colspan=\"4\">distance\". More examples are in Table 2 and 3.</td></tr><tr><td colspan=\"3\">2.3 Train Test</td><td>Test</td></tr><tr><td>lexical</td><td colspan=\"2\">1362 1362</td><td>-</td></tr><tr><td>factivity</td><td>824</td><td>824</td><td>-</td></tr><tr><td>syntactic</td><td colspan=\"2\">1439 1438</td><td>-</td></tr><tr><td>negation</td><td>688</td><td>688</td><td>246</td></tr><tr><td>boolean</td><td>801</td><td>800</td><td>210</td></tr><tr><td>logic</td><td>895</td><td>896</td><td>207*</td></tr><tr><td colspan=\"3\">deductions 1019 1019</td><td>-</td></tr><tr><td>causal</td><td colspan=\"2\">1176 1177</td><td>61</td></tr><tr><td colspan=\"2\">knowledge 301</td><td>301</td><td>154</td></tr></table>", |
| "text": "", |
| "html": null, |
| "num": null |
| }, |
| "TABREF7": { |
| "type_str": "table", |
| "content": "<table><tr><td>cat-</td></tr></table>", |
| "text": "For both TAXIXNLI (translated) and TAXIXNLI (diagnostic), we report the number of training and test examples for each TAXINLI categories inTable 4. In the diagnostic set, we annotate a total 1435 examples, among which there were 650 examples, where no selected category was marked.", |
| "html": null, |
| "num": null |
| } |
| } |
| } |
| } |