| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T02:12:56.401590Z" |
| }, |
| "title": "Can Monolingual Pretrained Models Help Cross-Lingual Classification?", |
| "authors": [ |
| { |
| "first": "Zewen", |
| "middle": [], |
| "last": "Chi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Beijing Institute of Technology \u2021 Microsoft Research", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Beijing Institute of Technology \u2021 Microsoft Research", |
| "location": {} |
| }, |
| "email": "lidong1@microsoft.com" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Beijing Institute of Technology \u2021 Microsoft Research", |
| "location": {} |
| }, |
| "email": "fuwei@microsoft.com" |
| }, |
| { |
| "first": "Xian-Ling", |
| "middle": [], |
| "last": "Mao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Beijing Institute of Technology \u2021 Microsoft Research", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Heyan", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Beijing Institute of Technology \u2021 Microsoft Research", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Multilingual pretrained language models (such as multilingual BERT) have achieved impressive results for cross-lingual transfer. However, due to the constant model capacity, multilingual pre-training usually lags behind the monolingual competitors. In this work, we present two approaches to improve zero-shot cross-lingual classification, by transferring the knowledge from monolingual pretrained models to multilingual ones. Experimental results on two cross-lingual classification benchmarks show that our methods outperform vanilla multilingual fine-tuning.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Multilingual pretrained language models (such as multilingual BERT) have achieved impressive results for cross-lingual transfer. However, due to the constant model capacity, multilingual pre-training usually lags behind the monolingual competitors. In this work, we present two approaches to improve zero-shot cross-lingual classification, by transferring the knowledge from monolingual pretrained models to multilingual ones. Experimental results on two cross-lingual classification benchmarks show that our methods outperform vanilla multilingual fine-tuning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Supervised text classification heavily relies on manually annotated training data, while the data are usually only available in rich-resource languages, such as English. It requires great effort to make the resources available in other languages. Various methods have been proposed to build cross-lingual classification models by exploiting machine translation systems (Xu and Yang, 2017; Chen et al., 2018; Conneau et al., 2018) , and learning multilingual embeddings (Conneau et al., 2018; Artetxe and Schwenk, 2019; Eisenschlos et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 369, |
| "end": 388, |
| "text": "(Xu and Yang, 2017;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 389, |
| "end": 407, |
| "text": "Chen et al., 2018;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 408, |
| "end": 429, |
| "text": "Conneau et al., 2018)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 469, |
| "end": 491, |
| "text": "(Conneau et al., 2018;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 492, |
| "end": 518, |
| "text": "Artetxe and Schwenk, 2019;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 519, |
| "end": 544, |
| "text": "Eisenschlos et al., 2019)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Recently, multilingual pretrained language models have shown surprising cross-lingual effectiveness on a wide range of downstream tasks (Devlin et al., 2019; Conneau and Lample, 2019; Conneau et al., 2020; Chi et al., 2020a,b) . Even without using any parallel corpora, the pretrained models can still perform zero-shot cross-lingual classification (Pires et al., 2019; Wu and Dredze, 2019; Keung et al., 2019) . That is, these models can be fine-tuned in a source language, and then directly evaluated in other target languages. Despite * Contribution during internship at Microsoft Research. the effectiveness of cross-lingual transfer, the multilingual pretrained language models have their own drawbacks. Due to the constant number of model parameters, the model capacity of the richresource languages decreases if we adds languages for pre-training. The curse of multilinguality results in that the multilingual models usually perform worse than their monolingual competitors on downstream tasks (Arivazhagan et al., 2019; Conneau et al., 2020) . The observations motivate us to leverage monolingual pretrained models to improve multilingual models for cross-lingual classification.", |
| "cite_spans": [ |
| { |
| "start": 136, |
| "end": 157, |
| "text": "(Devlin et al., 2019;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 158, |
| "end": 183, |
| "text": "Conneau and Lample, 2019;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 184, |
| "end": 205, |
| "text": "Conneau et al., 2020;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 206, |
| "end": 226, |
| "text": "Chi et al., 2020a,b)", |
| "ref_id": null |
| }, |
| { |
| "start": 349, |
| "end": 369, |
| "text": "(Pires et al., 2019;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 370, |
| "end": 390, |
| "text": "Wu and Dredze, 2019;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 391, |
| "end": 410, |
| "text": "Keung et al., 2019)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1001, |
| "end": 1027, |
| "text": "(Arivazhagan et al., 2019;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 1028, |
| "end": 1049, |
| "text": "Conneau et al., 2020)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we propose a multilingual finetuning method (MONOX) based on the teacherstudent framework, where a multilingual student model learns end task skills from a monolingual teacher. Intuitively, monolingual pretrained models are used to provide supervision of downstream tasks, while multilingual models are employed for knowledge transfer across languages. We conduct experiments on two widely used cross-lingual classification datasets, where our methods outperform baseline models on zero-shot cross-lingual classification. Moreover, we show that the monolingual teacher model can help the student multilingual model for both the source language and target languages, even though the student model is only trained in the source language.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We use multilingual BERT (Devlin et al., 2019) for multilingual pretrained language models. The pretrained model uses the BERT-style Transformer (Vaswani et al., 2017) architecture, and follows the similar fine-tuning procedure as BERT for text classification, which is illustrated in Figure 1(a) . To be specific, the first input token of the models is always a special classification token [CLS]. During fine-tuning, the final hidden state of the special token is used as the sentence representation. In order to output predictions, an additional softmax classifier is built on top of the sentence representation. Denoting D as the training data in the source language, the pretrained models are fine-tuned with standard cross-entropy loss:", |
| "cite_spans": [ |
| { |
| "start": 25, |
| "end": 46, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 145, |
| "end": 167, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 285, |
| "end": 296, |
| "text": "Figure 1(a)", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Background: Multilingual Fine-Tuning", |
| "sec_num": "2" |
| }, |
| { |
| "text": "L CE (\u03b8; D) = \u2212 (x,y)\u2208D log p(y|x; \u03b8)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background: Multilingual Fine-Tuning", |
| "sec_num": "2" |
| }, |
| { |
| "text": "where \u03b8 represents model parameters. Then the model is directly evaluated on other languages for cross-lingual classification.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background: Multilingual Fine-Tuning", |
| "sec_num": "2" |
| }, |
| { |
| "text": "As shown in Figure 1 (b), we first fine-tune the monolingual pretrained model in the source language. Then we transfer task knowledge to the multilingual pretrained model by soft (Section 3.1) or hard (Section 3.2) labels. We describe two variants of our proposed method (MONOX) as follows.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 20, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In order to transfer task-specific knowledge from monolingual model to multilingual model, we propose to use knowledge distillation (Hinton et al., 2015) under our MONOX framework, where a student model s is trained with soft labels generated by a better-learned teacher model t. The loss function of the student model is:", |
| "cite_spans": [ |
| { |
| "start": 132, |
| "end": 153, |
| "text": "(Hinton et al., 2015)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge Distillation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "L KD (\u03b8 s ; D, \u03b8 t ) = \u2212 (x,y)\u2208D K k=1 q(y = k|x; \u03b8 t ) log p(y = k|x; \u03b8 s )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge Distillation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where p(\u2022) and q(\u2022) represent the probability distribution over K categories, predicted by the student s and the teacher t, respectively. Notice that only the student model parameters \u03b8 s are updated during knowledge distillation. As shown in Figure 1 (b), we first use the fine-tuned monolingual pretrained model as a teacher, which is learned by minimizing L CE (\u03b8 t ; D). Then we perform knowledge distillation for the student model with L KD (\u03b8 s ; D C , \u03b8 t ) as the loss function, where D C is the concatenation of training dataset and the unlabeled dataset in the source language. We denote this implementation as MONOX-KD.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 243, |
| "end": 251, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Knowledge Distillation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In addition to knowledge distillation, we also consider implementing MONOX by training the student multilingual model with pseudo-label (Lee, 2013) . Specifically, after fine-tuning the monolingual pretrained model on the training data as teacher, we apply the teacher model on the unlabeled data in the source language to generate pseudo labels. Next, we filter the pseudo labels by a prediction confidence threshold, and only keep the examples with higher confidence scores. Notice that the pseudo training data are assigned with hard labels. Finally, we concatenate the original training data and the pseudo data as the final training set for the student model. We denote this implementation as MONOX-PL.", |
| "cite_spans": [ |
| { |
| "start": 136, |
| "end": 147, |
| "text": "(Lee, 2013)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pseudo-Label", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In the following experiments, we consider the zero-shot cross-lingual setting, where models are trained with English data and directly evaluated on all target languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We conduct experiments on two widely used datasets for cross-lingual evaluation:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": null |
| }, |
| { |
| "text": "(1) Cross-Lingual Sentiment (CLS) dataset (Prettenhofer and Stein, 2010), containing Amazon reviews in three domains and four languages;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": null |
| }, |
| { |
| "text": "(2) Cross-Lingual NLI (XNLI) dataset (Conneau et al., 2018) , containing development and test sets in 15 languages and a training set in English for the natural language inference task.", |
| "cite_spans": [ |
| { |
| "start": 37, |
| "end": 59, |
| "text": "(Conneau et al., 2018)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": null |
| }, |
| { |
| "text": "We use multilingual BERT BASE 1 for cross-lingual transfer. For monolingual pretrained language model, the English-version RoBERTa LARGE 2 is employed. All the pretrained models used in our experiments are cased models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pretrained Language Models", |
| "sec_num": null |
| }, |
| { |
| "text": "Baselines We compare our methods (MONOX-KD, and MONOX-PL) with the following models:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pretrained Language Models", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 MBERT: directly fine-tuning the multilingual BERT BASE with English training data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pretrained Language Models", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 MBERT-ST: fine-tuning the multilingual BERT BASE by self-training, i.e., alternately fine-tuning mBERT and updating the training data by labeling English unlabeled examples.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pretrained Language Models", |
| "sec_num": null |
| }, |
| { |
| "text": "For the CLS dataset, we randomly select 20% examples from training data as the development set and use the remaining examples as the training set. For XNLI, we randomly sample 20% examples from training data as the training set, and regard the other examples as the unlabeled set. We use the vocabularies provided by the pretrained models, which are extracted by Byte-Pair Encoding (Sennrich et al., 2016) . The input sentences are truncated to 256 tokens. For both datasets, we use Adam optimizer with a learning rate of 5 \u00d7 10 \u22126 , and a batch size of 8. We train models with epoch size of 200 and 2,500 steps for CLS and XNLI, respectively. For MONOX-KD, the softmax temperature of knowledge distillation is set to 0.1. For MONOX-PL, the confidence threshold is set to zero, which means all of the generated pseudo labels are used as training data.", |
| "cite_spans": [ |
| { |
| "start": 382, |
| "end": 405, |
| "text": "(Sennrich et al., 2016)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Configuration", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Preliminary Experiments To see how much monolingual pretrained models is better than multilingual pretrained models, we finetune several different pretrained language models on the two datasets under the aforementioned configuration, and only evaluate them in English. As shown in Table 1 , the gap between multilingual and monolingual pretrained models is large, even when using the same size of parameters. It is not hard to explain because MBERT is trained in 104 languages, where different languages tend to confuse each other.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 281, |
| "end": 288, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We evaluate our method on the zero-shot cross-lingual sentiment classification task. The goal of sentiment classification is to classify input sentences to positive or negative sentiments. In Table 2 we compare the results of our methods with baselines on CLS. It can be observed that our MONOX method outperforms baselines in all evaluated languages and domains, providing 4.91% improvement of averaged accuracy to the original multilingual BERT fine-tuning method. Notice that MBERT-ST is trained under the same condition with our method, i.e., using the same labeled and unlabeled data as ours. However, we only observe a slight improvement over MBERT, which demonstrates that the performance improvement of MONOX mainly benefits from its end task knowledge transfer rather than the unlabeled data.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 192, |
| "end": 199, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sentiment Classification", |
| "sec_num": null |
| }, |
| { |
| "text": "We also evaluate our method on the zero-shot cross-lingual NLI task, which is more challenging than sentiment classification. The goal of NLI is to identify the relationship of a pair of input sentences, including a premise and a hypothesis with an entailment, contradiction, or neutral relationship between them. As shown in Table 3 : Evaluation results of zero-shot cross-lingual NLI on the XNLI dataset. Note that 20% of the original training data are used as training set, and the other 80% are used as unlabeled set.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 326, |
| "end": 333, |
| "text": "Table 3", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Natural Language Inference", |
| "sec_num": null |
| }, |
| { |
| "text": "10 1 10 2 10 3 10 4 10 5 Training Data Size fully helps the multilingual pretrained model gain end task knowledge from the monolingual pretrained model for cross-lingual classification. It is also worth mentioning that the performance of MBERT-ST is similar to MBERT. We believe the reason is that XNLI has more training data than CLS, which wakens the impact of self-training.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Natural Language Inference", |
| "sec_num": null |
| }, |
| { |
| "text": "We conduct a study on how much multilingual pretrained model can learn from monolingual pretrained model for different training data size. We cut the training data to 10, 100, 1K, 10K and 78K (full training data in our setting) examples, and keep other hyper-parameters fixed. In Figure 2 , we show the averaged accuracy scores for zero-shot XNLI with different training data sizes. We observe that MONOX outperforms MBERT on all data sizes except the 10-example setting. When the training data is relatively small (\u2264 10 4 ), our method shows 10 3 10 2 10 1 10 0 10 1 10 2 Distillation Temperature 61.5 62.0 Figure 3 presents XNLI averaged accuracy scores of MONOX-KD with different softmax temperatures in knowledge distillation. Even though the temperature varies from 10 \u22123 to 10 2 , all of the results are higher than baseline scores, which indicates MONOX-KD is nonsensitive to the temperature. When the temperature is set to 10 \u22121 , we observe the best results on the development set. Therefore we set temperature as 0.1 in other experiments.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 280, |
| "end": 288, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 608, |
| "end": 616, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effects of Training Data Size", |
| "sec_num": null |
| }, |
| { |
| "text": "In this work, we investigated whether a monolingual pretrained model can help cross-lingual classification. Our results have shown that, with a RoBERTa model pretrained in English, we can boost the classification performance of a pretrained multilingual BERT in other languages. For future work, we will explore whether mono-lingual pretrained models can help other crosslingual NLP tasks, such as natural language generation (Chi et al., 2020a) .", |
| "cite_spans": [ |
| { |
| "start": 426, |
| "end": 445, |
| "text": "(Chi et al., 2020a)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "https://github.com/google-research/ bert/blob/master/multilingual.md 2 https://github.com/pytorch/fairseq/ tree/master/examples/roberta", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "Prof. Heyan Huang is the corresponding author. The work is supported by National Key R&D Plan (No. 2016QY03D0602), NSFC (No. U19B2020, 61772076, 61751201 and 61602197) and NSFB (No. Z181100008918002).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Massively multilingual neural machine translation in the wild: Findings and challenges", |
| "authors": [ |
| { |
| "first": "Naveen", |
| "middle": [], |
| "last": "Arivazhagan", |
| "suffix": "" |
| }, |
| { |
| "first": "Ankur", |
| "middle": [], |
| "last": "Bapna", |
| "suffix": "" |
| }, |
| { |
| "first": "Orhan", |
| "middle": [], |
| "last": "Firat", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "Gatu" |
| ], |
| "last": "Dmitry Lepikhin", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxim", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Mia", |
| "middle": [ |
| "Xu" |
| ], |
| "last": "Krikun", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuan", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Colin", |
| "middle": [], |
| "last": "Foster", |
| "suffix": "" |
| }, |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Cherry", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Macherey", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Naveen Arivazhagan, Ankur Bapna, Orhan Firat, Dmitry Lepikhin, M. Gatu Johnson, Maxim Krikun, Mia Xu Chen, Yuan Cao, George Foster, Colin Cherry, Wolfgang Macherey, Zhifeng Chen, and Yonghui Wu. 2019. Massively multilingual neural machine translation in the wild: Findings and chal- lenges. ArXiv, abs/1907.05019.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Massively multilingual sentence embeddings for zeroshot cross-lingual transfer and beyond", |
| "authors": [ |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Artetxe", |
| "suffix": "" |
| }, |
| { |
| "first": "Holger", |
| "middle": [], |
| "last": "Schwenk", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "7", |
| "issue": "", |
| "pages": "597--610", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00288" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikel Artetxe and Holger Schwenk. 2019. Mas- sively multilingual sentence embeddings for zero- shot cross-lingual transfer and beyond. Transac- tions of the Association for Computational Linguis- tics, 7:597-610.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Adversarial deep averaging networks for cross-lingual sentiment classification", |
| "authors": [ |
| { |
| "first": "Xilun", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Athiwaratkun", |
| "suffix": "" |
| }, |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Cardie", |
| "suffix": "" |
| }, |
| { |
| "first": "Kilian", |
| "middle": [], |
| "last": "Weinberger", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "6", |
| "issue": "", |
| "pages": "557--570", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00039" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xilun Chen, Yu Sun, Ben Athiwaratkun, Claire Cardie, and Kilian Weinberger. 2018. Adversarial deep av- eraging networks for cross-lingual sentiment classi- fication. Transactions of the Association for Com- putational Linguistics, 6:557-570.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Cross-lingual natural language generation via pre-training", |
| "authors": [ |
| { |
| "first": "Zewen", |
| "middle": [], |
| "last": "Chi", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Wenhui", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xian-Ling", |
| "middle": [], |
| "last": "Mao", |
| "suffix": "" |
| }, |
| { |
| "first": "Heyan", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "The Thirty-Fourth AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "7570--7577", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zewen Chi, Li Dong, Furu Wei, Wenhui Wang, Xian- Ling Mao, and Heyan Huang. 2020a. Cross-lingual natural language generation via pre-training. In The Thirty-Fourth AAAI Conference on Artificial Intelli- gence, pages 7570-7577. AAAI Press.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "In-foXLM: An information-theoretic framework for cross-lingual language model pre-training", |
| "authors": [ |
| { |
| "first": "Zewen", |
| "middle": [], |
| "last": "Chi", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Nan", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Saksham", |
| "middle": [], |
| "last": "Singhal", |
| "suffix": "" |
| }, |
| { |
| "first": "Wenhui", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xia", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Xian-Ling", |
| "middle": [], |
| "last": "Mao", |
| "suffix": "" |
| }, |
| { |
| "first": "Heyan", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zewen Chi, Li Dong, Furu Wei, Nan Yang, Sak- sham Singhal, Wenhui Wang, Xia Song, Xian-Ling Mao, Heyan Huang, and Ming Zhou. 2020b. In- foXLM: An information-theoretic framework for cross-lingual language model pre-training. ArXiv, abs/2007.07834.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Unsupervised cross-lingual representation learning at scale", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kartikay", |
| "middle": [], |
| "last": "Khandelwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Vishrav", |
| "middle": [], |
| "last": "Chaudhary", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Wenzek", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Guzm\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "8440--8451", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.747" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\u00e1n, Edouard Grave, Myle Ott, Luke Zettle- moyer, and Veselin Stoyanov. 2020. Unsupervised cross-lingual representation learning at scale. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 8440- 8451, Online. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Crosslingual language model pretraining", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "7057--7067", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau and Guillaume Lample. 2019. Cross- lingual language model pretraining. In Advances in Neural Information Processing Systems, pages 7057-7067. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Xnli: Evaluating crosslingual sentence representations", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruty", |
| "middle": [], |
| "last": "Rinott", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Adina", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [ |
| "R" |
| ], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "Holger", |
| "middle": [], |
| "last": "Schwenk", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Ruty Rinott, Guillaume Lample, Ad- ina Williams, Samuel R. Bowman, Holger Schwenk, and Veselin Stoyanov. 2018. Xnli: Evaluating cross- lingual sentence representations. In Proceedings of the 2018 Conference on Empirical Methods in Nat- ural Language Processing. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "MultiFiT: Efficient multi-lingual language model fine-tuning", |
| "authors": [ |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Eisenschlos", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| }, |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Czapla", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcin", |
| "middle": [], |
| "last": "Kadras", |
| "suffix": "" |
| }, |
| { |
| "first": "Sylvain", |
| "middle": [], |
| "last": "Gugger", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeremy", |
| "middle": [], |
| "last": "Howard", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "5702--5707", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1572" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Julian Eisenschlos, Sebastian Ruder, Piotr Czapla, Marcin Kadras, Sylvain Gugger, and Jeremy Howard. 2019. MultiFiT: Efficient multi-lingual language model fine-tuning. In Proceedings of the 2019 Conference on Empirical Methods in Natu- ral Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 5702-5707, Hong Kong, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Distilling the knowledge in a neural network", |
| "authors": [ |
| { |
| "first": "Geoffrey", |
| "middle": [ |
| "E" |
| ], |
| "last": "Hinton", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Geoffrey E. Hinton, Oriol Vinyals, and Jeffrey Dean. 2015. Distilling the knowledge in a neural network. ArXiv, abs/1503.02531.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Adversarial learning with contextual embeddings for zero-resource cross-lingual classification and NER", |
| "authors": [ |
| { |
| "first": "Phillip", |
| "middle": [], |
| "last": "Keung", |
| "suffix": "" |
| }, |
| { |
| "first": "Yichao", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Vikas", |
| "middle": [], |
| "last": "Bhardwaj", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1355--1360", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1138" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Phillip Keung, Yichao Lu, and Vikas Bhardwaj. 2019. Adversarial learning with contextual embeddings for zero-resource cross-lingual classification and NER. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 1355- 1360, Hong Kong, China. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Pseudo-label : The simple and efficient semi-supervised learning method for deep neural networks", |
| "authors": [ |
| { |
| "first": "Dong-Hyun", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "ICML 2013 Workshop : Challenges in Representation Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dong-Hyun Lee. 2013. Pseudo-label : The simple and efficient semi-supervised learning method for deep neural networks. ICML 2013 Workshop : Chal- lenges in Representation Learning.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "How multilingual is multilingual BERT?", |
| "authors": [ |
| { |
| "first": "Telmo", |
| "middle": [], |
| "last": "Pires", |
| "suffix": "" |
| }, |
| { |
| "first": "Eva", |
| "middle": [], |
| "last": "Schlinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Garrette", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "4996--5001", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1493" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Telmo Pires, Eva Schlinger, and Dan Garrette. 2019. How multilingual is multilingual BERT? In Pro- ceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 4996- 5001, Florence, Italy. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Crosslanguage text classification using structural correspondence learning", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Prettenhofer", |
| "suffix": "" |
| }, |
| { |
| "first": "Benno", |
| "middle": [], |
| "last": "Stein", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1118--1127", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Prettenhofer and Benno Stein. 2010. Cross- language text classification using structural corre- spondence learning. In Proceedings of the 48th Annual Meeting of the Association for Computa- tional Linguistics, pages 1118-1127, Uppsala, Swe- den. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Neural machine translation of rare words with subword units", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1715--1725", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1162" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of the 54th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1715- 1725, Berlin, Germany. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "30", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems 30, pages 5998-6008. Curran Asso- ciates, Inc.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Beto, bentz, becas: The surprising cross-lingual effectiveness of BERT", |
| "authors": [ |
| { |
| "first": "Shijie", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Dredze", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "833--844", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1077" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shijie Wu and Mark Dredze. 2019. Beto, bentz, be- cas: The surprising cross-lingual effectiveness of BERT. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 833-844, Hong Kong, China. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Cross-lingual distillation for text classification", |
| "authors": [ |
| { |
| "first": "Ruochen", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1415--1425", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-1130" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ruochen Xu and Yiming Yang. 2017. Cross-lingual distillation for text classification. In Proceedings of the 55th Annual Meeting of the Association for Com- putational Linguistics (Volume 1: Long Papers), pages 1415-1425, Vancouver, Canada. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Multilingual seq2seq training with similarity loss for cross-lingual document classification", |
| "authors": [ |
| { |
| "first": "Katherine", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Haoran", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Barlas", |
| "middle": [], |
| "last": "Oguz", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of The Third Workshop on Representation Learning for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "175--179", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Katherine Yu, Haoran Li, and Barlas Oguz. 2018. Multilingual seq2seq training with similarity loss for cross-lingual document classification. In Pro- ceedings of The Third Workshop on Representation Learning for NLP, pages 175-179.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "uris": null, |
| "text": "Illustration of multilingual LM fine-tuning. (a) The original multilingual LM fine-tuning procedure for cross-lingual classification. (b) The fine-tuning procedure of our proposed MONOX via knowledge distillation (MONOX-KD). Notice that MONOX does not use any target language data during fine-tuning.", |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "num": null, |
| "uris": null, |
| "text": "Averaged accuracy scores on zero-shot XNLI with different training data sizes. (20% and 80% of the training data are regraded training and unlabeled set.)", |
| "type_str": "figure" |
| }, |
| "FIGREF2": { |
| "num": null, |
| "uris": null, |
| "text": "Averaged accuracy scores on the development set for zero-shot XNLI with different softmax temperatures of MONOX-KD. a great improvement.", |
| "type_str": "figure" |
| }, |
| "TABREF1": { |
| "num": null, |
| "html": null, |
| "text": "Music Books DVD Music Books DVD Music Books DVD Music avg MBERT 87.75 86.60 84.75 79.55 75.90 77.05 81.45 80.35 80.35 75.15 76.90 75.90 80.14 MBERT-ST 88.20 85.50 88.00 79.65 76.70 80.00 84.85 83.25 80.55 74.60 75.80 76.90 81.17 MONOX-PL 94.00 92.75 91.80 83.20 79.25 82.95 86.00 84.95 84.55 78.85 80.00 79.35 84.80 MONOX-KD 93.90 91.40 92.25 84.20 81.50 83.65 85.40 85.90 83.95 78.95 79.15 80.30 85.05", |
| "type_str": "table", |
| "content": "<table><tr><td>, we present the evaluation</td></tr><tr><td>results on XNLI. Unsurprisingly, both MONOX-</td></tr><tr><td>PL and MONOX-KD perform better than base-</td></tr><tr><td>line methods, showing that our method success-</td></tr></table>" |
| }, |
| "TABREF2": { |
| "num": null, |
| "html": null, |
| "text": "Evaluation results of zero-shot cross-lingual sentiment classification on the CLS dataset.", |
| "type_str": "table", |
| "content": "<table><tr><td/><td>ar</td><td>bg</td><td>de</td><td>el</td><td>en</td><td>es</td><td>fr</td><td>hi</td><td>ru</td><td>sw</td><td>th</td><td>tr</td><td>ur</td><td>vi</td><td>zh avg</td></tr><tr><td>MBERT</td><td colspan=\"15\">61.2 67.4 65.8 61.6 77.1 70.7 68.6 53.4 67.0 50.6 44.6 56.3 57.8 43.6 67.8 60.9</td></tr><tr><td>MBERT-ST</td><td colspan=\"15\">60.9 67.6 65.4 61.0 77.6 70.4 68.9 53.1 65.9 50.6 41.8 55.2 56.8 43.6 67.9 60.5</td></tr><tr><td colspan=\"16\">MONOX-PL 63.5 70.1 69.8 61.7 80.9 74.1 72.1 52.5 68.4 51.2 42.3 57.9 58.0 44.0 70.2 62.5</td></tr><tr><td colspan=\"16\">MONOX-KD 62.2 69.3 69.3 62.1 79.6 72.9 72.0 52.8 68.6 52.3 41.7 57.9 58.5 45.9 70.8 62.4</td></tr></table>" |
| } |
| } |
| } |
| } |