| { |
| "paper_id": "K19-1035", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:05:11.957685Z" |
| }, |
| "title": "Cross-lingual Dependency Parsing with Unlabeled Auxiliary Languages", |
| "authors": [ |
| { |
| "first": "Wasi", |
| "middle": [ |
| "Uddin" |
| ], |
| "last": "Ahmad", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of California", |
| "location": { |
| "settlement": "Los Angeles" |
| } |
| }, |
| "email": "wasiahmad@cs.ucla.edu" |
| }, |
| { |
| "first": "Zhisong", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Carnegie Mellon University", |
| "location": {} |
| }, |
| "email": "zhisongz@cs.cmu.edu" |
| }, |
| { |
| "first": "Xuezhe", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "xuezhem@cs.cmu.edu" |
| }, |
| { |
| "first": "Kai-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of California", |
| "location": { |
| "settlement": "Los Angeles" |
| } |
| }, |
| "email": "kwchang@cs.ucla.edu" |
| }, |
| { |
| "first": "Nanyun", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Southern California", |
| "location": {} |
| }, |
| "email": "npeng@isi.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Cross-lingual transfer learning has become an important weapon to battle the unavailability of annotated resources for low-resource languages. One of the fundamental techniques to transfer across languages is learning language-agnostic representations, in the form of word embeddings or contextual encodings. In this work, we propose to leverage unannotated sentences from auxiliary languages to help learning language-agnostic representations. Specifically, we explore adversarial training for learning contextual encoders that produce invariant representations across languages to facilitate cross-lingual transfer. We conduct experiments on cross-lingual dependency parsing where we train a dependency parser on a source language and transfer it to a wide range of target languages. Experiments on 28 target languages demonstrate that adversarial training significantly improves the overall transfer performances under several different settings. We conduct a careful analysis to evaluate the language-agnostic representations resulted from adversarial training.", |
| "pdf_parse": { |
| "paper_id": "K19-1035", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Cross-lingual transfer learning has become an important weapon to battle the unavailability of annotated resources for low-resource languages. One of the fundamental techniques to transfer across languages is learning language-agnostic representations, in the form of word embeddings or contextual encodings. In this work, we propose to leverage unannotated sentences from auxiliary languages to help learning language-agnostic representations. Specifically, we explore adversarial training for learning contextual encoders that produce invariant representations across languages to facilitate cross-lingual transfer. We conduct experiments on cross-lingual dependency parsing where we train a dependency parser on a source language and transfer it to a wide range of target languages. Experiments on 28 target languages demonstrate that adversarial training significantly improves the overall transfer performances under several different settings. We conduct a careful analysis to evaluate the language-agnostic representations resulted from adversarial training.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Cross-lingual transfer, where a model learned from one language is transferred to another, has become an important technique to improve the quality and coverage of natural language processing (NLP) tools for languages in the world. This technique has been widely applied in many applications, including part-of-speech (POS) tagging (Kim et al., 2017) , dependency parsing (Ma and Xia, 2014) , named entity recognition (Xie et al., 2018) , entity linking , coreference resolution , and question answering (Joty et al., 2017) . Noteworthy improvements are achieved on low resource language applications due to cross-lingual transfer learning.", |
| "cite_spans": [ |
| { |
| "start": 332, |
| "end": 350, |
| "text": "(Kim et al., 2017)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 372, |
| "end": 390, |
| "text": "(Ma and Xia, 2014)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 418, |
| "end": 436, |
| "text": "(Xie et al., 2018)", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 504, |
| "end": 523, |
| "text": "(Joty et al., 2017)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we study cross-lingual transfer for dependency parsing. A dependency parser consists of (1) an encoder that transforms an input text sequence into latent representations and (2) a decoding algorithm that generates the corresponding parse tree. In cross-lingual transfer, most recent approaches assume that the inputs from different languages are aligned into the same embedding space via multilingual word embeddings or multilingual contextualized word vectors, such that the parser trained on a source language can be transferred to target languages. However, when training a parser on the source language, the encoder not only learns to embed a sentence but it also carries language-specific properties, such as word order typology. Therefore, the parser suffers when it is transferred to a language with different language properties. Motivated by this, we study how to train an encoder for generating language-agnostic representations that can be transferred across a wide variety of languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We propose to utilize unlabeled sentences of one or more auxiliary languages to train an encoder that learns language-agnostic contextual representations of sentences to facilitate crosslingual transfer. To utilize the unlabeled auxiliary language corpora, we adopt adversarial training of the encoder and a classifier that predicts the language identity of an input sentence from its encoded representation produced by the encoder. The adversarial training encourages the encoder to produce language invariant representations such that the language classifier fails to predict the correct language identity. As the encoder is jointly trained with a loss for the primary task on the source language and adversarial loss on all languages, we hypothesize that it will learn to capture task-specific features as well as generic structural patterns applicable to many languages, and thus have better transferrability.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To verify the proposed approach, we conduct experiments on neural dependency parsers trained on English (source language) and directly transfer them to 28 target languages, with or without the assistance of unlabeled data from auxiliary languages. We chose dependency parsing as the primary task since it is one of the core NLP applications and the development of Universal Dependencies (Nivre et al., 2016) provides consistent annotations across languages, allowing us to investigate transfer learning in a wide range of languages. Thorough experiments and analyses are conducted to address the following research questions:", |
| "cite_spans": [ |
| { |
| "start": 387, |
| "end": 407, |
| "text": "(Nivre et al., 2016)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Does encoder trained with adversarial training generate language-agnostic representations? \u2022 Does language-agnostic representations improve cross-language transfer?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Experimental results show that the proposed approach consistently outperform a strong baseline parser (Ahmad et al., 2019) , with a significant margin in two family of languages. In addition, we conduct experiments to consolidate our findings with different types of input representations and encoders. Our experiment code is publicly available to facilitate future research. 1", |
| "cite_spans": [ |
| { |
| "start": 102, |
| "end": 122, |
| "text": "(Ahmad et al., 2019)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We train the encoder of a dependency parser in an adversarial fashion to guide it to avoid capturing language-specific information. In particular, we introduce a language identification task where a classifier predicts the language identity (id) of an input sentence from its encoded representation. Then the encoder is trained such that the classifier fails to predict the language id while the parser decoder predicts the parse tree accurately from the encoded representation. We hypothesize that such an encoder would have better cross-lingual transferability. The overall architecture of our model is illustrated in Figure 1 . In the following, we present the details of the model and training method.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 620, |
| "end": 628, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Training Language-agnostic Encoders", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our model consists of three basic components, (1) a general encoder, (2) a decoder for parsing, and (3) a classifier for language identification. The encoder learns to generate contextualized representations for the input sentence (a word sequence) Figure 1 : An overview of our experimental model consists of three basic components: (1) Encoder, (2) (Parsing) Decoder, and (3) (Language) Classifier. We also show how parsing and adversarial losses (L p and L d ) are back propagated for parameter updates.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 249, |
| "end": 257, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Architecture", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "which are fed to the decoder and the classifier to predict the dependency structure and the language identity (id) of that sentence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architecture", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The encoder and the decoder jointly form the parsing model and we consider two alternatives 2 from (Ahmad et al., 2019): \"SelfAtt-Graph\" and \"RNN-Stack\". The \"SelfAtt-Graph\" parser consists of a modified self-attentional encoder (Shaw et al., 2018 ) and a graph-based deep bi-affine decoder (Dozat and Manning, 2017) , while the \"RNN-Stack\" parser is composed of a Recurrent Neural Network (RNN) based encoder and a stack-pointer decoder (Ma et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 229, |
| "end": 247, |
| "text": "(Shaw et al., 2018", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 291, |
| "end": 316, |
| "text": "(Dozat and Manning, 2017)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 438, |
| "end": 455, |
| "text": "(Ma et al., 2018)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architecture", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "We stack a classifier (a linear classifier or a multi-layer Perceptron (MLP)) on top of the encoder to perform the language identification task. The identification task can be framed as either a word-or sentence-level classification task. For the sentence-level classification, we apply average pooling 3 on the contextual word representations generated by the encoder to form a fixed-length representation of the input sequence, which is fed to the classifier. For the word-level classification, we perform language classification for each token individually.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architecture", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Algorithm 1 Training procedure. Parameters to be trained: Encoder (\u03b8 g ), Decoder (\u03b8 p ), and Classifier (\u03b8 d ) X a = Annotated source language data X b = Unlabeled auxiliary language data I = Number of warm-up iterations k = Number of learning steps for the discriminator (D) at each iteration \u03bb = Coefficient of L d \u03b1 1 , \u03b1 1 = learning rate; B = Batch size Require:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architecture", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "1: for j = 0, \u2022 \u2022 \u2022 , I do 2:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architecture", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Update", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architecture", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u03b8 g := \u03b8 g \u2212 \u03b1 1 \u2207 \u03b8g L p 3:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architecture", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Update", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architecture", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u03b8 p := \u03b8 p \u2212 \u03b1 1 \u2207 \u03b8p L p 4: for j = I, \u2022 \u2022 \u2022 , num iter do 5:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architecture", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "for k steps do 6:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architecture", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "(x i a ) B/2 i=1 \u2190 Sample a batch from X a 7: (x i b ) B/2 i=1 \u2190 Sample a batch from X b 8: Update \u03b8 d := \u03b8 d \u2212 \u03b1 2 \u2207 \u03b8 d L d 9: Total loss L := L p \u2212 \u03bbL d", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architecture", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "10:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architecture", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Update \u03b8 g := \u03b8 g \u2212 \u03b1 1 \u2207 \u03b8g L 11: Update \u03b8 p := \u03b8 p \u2212 \u03b1 1 \u2207 \u03b8p L", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architecture", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "In this work, following the terminology in adversarial learning literature, we interchangeably call the encoder as the generator, G and the classifier as the discriminator, D.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architecture", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Algorithm 1 describes the training procedure. We have two types of loss functions: L p for the parsing task and L d for the language identification task. For the former, we update the encoder and the decoder as in the regular training of a parser. For the latter, we adopt adversarial training to update the encoder and the classifier. We present the detailed training schemes in the following.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "To train the parser, we adopt both cross-entropy objectives for these two types of parsers as in (Dozat and Manning, 2017; Ma et al., 2018) . The encoder and the decoder are jointly trained to optimize the probability of the dependency trees (y) given sentences (x):", |
| "cite_spans": [ |
| { |
| "start": 97, |
| "end": 122, |
| "text": "(Dozat and Manning, 2017;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 123, |
| "end": 139, |
| "text": "Ma et al., 2018)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "L p = \u2212 log p(y|x).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "The probability of a tree can be further factorized into the products of the probabilities of each token's (m) head decision (h(m)) for the graph-based parser, or the probabilities of each transition step decision (t i ) for the transition-based parser:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "Graph: L p = \u2212 m log p(h(m)|x, m), Transition: L p = \u2212 i log p(t i |x, t <i ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "Our objective is to train the contextual encoder in a dependency parsing model such that it encodes language specific features as little as possible, which may help cross-lingual transfer. To achieve our goal, we utilize adversarial training by employing unlabeled auxiliary language corpora.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language Identification", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "Setup We adopt the basic generative adversarial network (GAN) for the adversarial training. We assume that X a and X b be the corpora of the source and auxiliary language sentences, respectively. The discriminator acts as a binary classifier and is adopted to distinguish the source and auxiliary languages. For the training of the discriminator, weights are updated according to the original classification loss:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language Identification", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "L d = E x\u223cX a [log D(G(x)]+ E x\u223cX b [log (1 \u2212 D(G(x))].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language Identification", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "For the training of dependency parsing, the generator, G collaborates with the parser but acts as an adversary with respect to the discriminator. Therefore, the generator weights (\u03b8 g ) are updated by minimizing the loss function,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language Identification", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "L = L p \u2212 \u03bbL d ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language Identification", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "where \u03bb is used to scale the discriminator loss (L d ). In this way, the generator is guided to build language-agnostic representations in order to fool the discriminator while being helpful for the parsing task. Meanwhile, the parser can be guided to rely more on the language-agnostic features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language Identification", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "Alternatives We also consider two alternative techniques for the adversarial training: Gradient Reversal (GR) (Ganin et al., 2016) and Wasserstein GAN (WGAN) . As opposed to GAN based training, in GR setup, the discriminator acts as a multiclass classifier that predicts language identity of the input sentence, and we use multi-class cross-entropy loss. We also study Wasserstein GAN (WGAN), which is proposed by to improve the stability of GAN based learning. Its loss function is shown as follows. here, the annotations are similar to those in the GAN setting.", |
| "cite_spans": [ |
| { |
| "start": 110, |
| "end": 130, |
| "text": "(Ganin et al., 2016)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language Identification", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "L d = E x\u223cX a [D(G(x)] \u2212 E x\u223cX b [D(G(x)], Language Families Languages Afro-Asiatic Arabic (ar), Hebrew (he) Austronesian Indonesian (id) IE.Baltic Latvian (lv) IE.Germanic Danish (da), Dutch (nl), English (en), German (de), Norwegian (no), Swedish (sv) IE.Indic Hindi (hi) IE.Latin Latin (la) IE.Romance Catalan (ca), French (fr), Italian (it), Portuguese (pt), Romanian (ro), Spanish (es) IE.Slavic Bulgarian (bg), Croatian (hr), Czech (cs), Polish (pl), Russian (ru), Slovak (sk), Slovenian (sl), Ukrainian (uk) Korean Korean (ko) Uralic Estonian (et), Finnish (fi)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language Identification", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "In this section, we discuss our experiments and analysis on cross-lingual dependency parsing transfer from a variety of perspectives and show the advantages of adversarial training.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments and Analysis", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Settings. In our experiments, we study singlesource parsing transfer, where a parsing model is trained on one source language and directly applied to the target languages. We conduct experiments on the Universal Dependencies (UD) Treebanks (v2.2) (Nivre et al., 2018) using 29 languages, as shown in Table 1 . We use the publicly available implementation 4 of the \"SelfAtt-Graph\" and \"RNN-Stack\" parsers. 5 Ahmad et al. 2019show that the \"SelfAtt-Graph\" parser captures less language-specific information and performs better than the 'RNN-Stack\" parser for distant target languages. Therefore, we use the \"SelfAtt-Graph\" parser in most of our experiments. Besides, the multilingual variant of BERT (mBERT) (Devlin et al., 2019) has shown to perform well in cross-lingual tasks (Wu and Dredze, 2019) and outperform the models trained on multilingual word embeddings by a large margin. Therefore, we consider conducting experiments with both multilingual word embeddings and mBERT. We use aligned multilingual word embeddings (Smith et al., 2017; Bojanowski et al., 2017) with 300 dimensionss or contextualized word representations provided by multilingual BERT 6 (Devlin et al., 2019) with 768 dimensions as the word representations. In addition, we use the Gold universal POS tags to form the input representations. 7 We freeze the word representations during training to avoid the risk of disarranging the multilingual representation alignments. We select six auxiliary languages 8 (French, Portuguese, Spanish, Russian, German, and Latin) for unsupervised language adaptation via adversarial training. We tune the scaling parameter \u03bb in the range of [0.1, 0.01, 0.001] on the source language validation set and report the test performance with the best value. For gradient reversal (GR) and GAN based adversarial objectives, we use Adam (Kingma and Ba, 2015) to optimize the discriminator parameters, and for WGAN, we use RM-SProp (Tieleman and Hinton, 2012) . The learning rate is set to 0.001 and 0.00005 for Adam and RM-SProp, respectively. We train the parsing models for 400 and 500 epochs with multilingual BERT and multilingual word embeddings respectively. We tune the parameter I (as shown in Algorithm 1) in the range of [50, 100, 150] .", |
| "cite_spans": [ |
| { |
| "start": 247, |
| "end": 267, |
| "text": "(Nivre et al., 2018)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 706, |
| "end": 727, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 777, |
| "end": 798, |
| "text": "(Wu and Dredze, 2019)", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 1024, |
| "end": 1044, |
| "text": "(Smith et al., 2017;", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 1045, |
| "end": 1069, |
| "text": "Bojanowski et al., 2017)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 1162, |
| "end": 1183, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 1316, |
| "end": 1317, |
| "text": "7", |
| "ref_id": null |
| }, |
| { |
| "start": 1933, |
| "end": 1960, |
| "text": "(Tieleman and Hinton, 2012)", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 2233, |
| "end": 2237, |
| "text": "[50,", |
| "ref_id": null |
| }, |
| { |
| "start": 2238, |
| "end": 2242, |
| "text": "100,", |
| "ref_id": null |
| }, |
| { |
| "start": 2243, |
| "end": 2247, |
| "text": "150]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 300, |
| "end": 307, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments and Analysis", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Language Test. The goal of training the contextual encoder adversarially with unlabeled data from auxiliary languages is to encourage the encoder to capture more language-agnostic representations and less language-dependent features. To test whether the contextual encoders retain language information after adversarial training, we train a multi-layer Perceptron (MLP) with softmax on top of the fixed contextual encoders to perform a 7-way classification task. 9 If a contextual encoder performs better in the language test, it indicates that the encoder retains language specific information. Table 2 presents the main transfer results of the \"SelfAtt-Graph\" parser when training on only English (en, baseline), English with French (enfr), and English with Russian (en-ru). The re- Table 2 : Cross-lingual transfer performances (UAS%/LAS%, excluding punctuation) of the SelfAtt-Graph parser (Ahmad et al., 2019) on the test sets. In column 1, languages are sorted by the word-ordering distance to English. (en-fr) and (en-ru) denotes the source-auxiliary language pairs. ' \u2020' indicates that the adversarially trained model results are statistically significantly better (by permutation test, p < 0.05) than the model trained only on the source language (en). Results show that the utilization of unlabeled auxiliary language corpora improves cross-lingual transfer performance significantly. sults demonstrate that the adversarial training with the auxiliary language identification task benefits cross-lingual transfer with a small performance drop on the source language. When multi-lingual embedding is employed, the performance significantly improves, in terms of UAS of 0.48 and 0.61 over the 29 languages when French and Russian are used as the auxiliary language, respectively. When richer multilingual representation technique like mBERT is employed, adversarial training can still improve cross-lingual transfer performances (0.21 and 0.54 UAS over the 29 languages by using French and Russian, respectively).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 596, |
| "end": 603, |
| "text": "Table 2", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 785, |
| "end": 792, |
| "text": "Table 2", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments and Analysis", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Next, we apply adversarial training on the \"RNN-Stack\" parser and show the results in Table 3. Similar to the \"SelfAtt-Graph\"parser, the \"RNN-Stack\" parser resulted in significant improvements in cross-lingual transfer from unsu-pervised language adaptation. We discuss our detailed experimental analysis in the following.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Analysis", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "To understand the impact of different adversarial training types and objectives, we apply adversarial training on both word-and sentence-level with gradient reversal (GR), GAN, and WGAN objectives. We provide the average cross-lingual transfer performances in Table 4 for different adversarial training setups. Among the adversarial training objectives, we observe that in most cases, the GAN objective results in better performances than the GR and WGAN objectives. Our finding is in contrast to Adel et al. (2018) where GR was reported to be the better objective. To further investigate, we perform the language test on the encoders trained via these two objectives. We find that the GR-based trained encoders perform consistently better than the GAN based ones on the language identification task, showing that via GAN-based training, the encoders become more language-agnostic. In a comparison between GAN and WGAN, we notice that GANbased training consistently performs better.", |
| "cite_spans": [ |
| { |
| "start": 497, |
| "end": 515, |
| "text": "Adel et al. (2018)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 260, |
| "end": 267, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Impact of Adversarial Training", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "Comparing word-and sentence-level adversarial training, we observe that predicting language identity at the word-level is slightly more useful for the \"SelfAtt-Graph\" model, while the sentence-level adversarial training results in better performances for the \"RNN-Stack\" model. There is no clear dominant strategy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Impact of Adversarial Training", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "In addition, we study the effect of using a linear classifier or a multi-layer Perceptron (MLP) as the discriminator and find that the interaction between the encoder and the linear classifier resulted in improvements. 10", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Impact of Adversarial Training", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "In section 3.1.1, we study the effect of learning language-agnostic representation by using auxiliary language with adversarial training. An alternative way to leverage auxiliary language corpora is by encoding language-specific information in the representation via multi-task learning. In the multi-task learning (MTL) setup, the model observes the same amount of data (both labeled and unlabeled) as the adversarially trained (AT) model. The only difference between the MTL and AT models is that in the MTL models, the contextual encoders are encouraged to capture languagedependent features while in the AT models, they are trained to encode language-agnostic features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adversarial v.s. Multi-task Training", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "The experiment results using multi-task learning in comparison with the adversarial training are presented in Table 5 . Interestingly, although the MTL objective sounds contradiction to adversarial learning, it has a positive effect on the crosslingual parsing, as the representations are learned with certain additional information from new (unlabeled) data. Using MTL, we sometimes observe improvements over the baseline parser, as indicated with the \u2020 sign, while the AT models consistently perform better than both the baseline and the MTL model (as shown in Columns 2-5 in Table 5 ). The comparisons on parsing performances do not reveal whether the contextual encoders learn to 10 This is a known issue in GAN training as the discriminator becomes too strong, it fails to provide useful signals to the generator. In our case, MLP as the discriminator predicts the language labels with higher accuracy and thus fails. Table 3 : Cross-lingual transfer results (UAS%/LAS%, excluding punctuation) of the RNN-Stack parser on the test sets. ' \u2020' indicates that the adversarially trained model results are statistically significantly better (by permutation test, p < 0.05) than the model trained only on the source language (en). encode language-agnostic or dependent features.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 110, |
| "end": 117, |
| "text": "Table 5", |
| "ref_id": null |
| }, |
| { |
| "start": 578, |
| "end": 586, |
| "text": "Table 5", |
| "ref_id": null |
| }, |
| { |
| "start": 924, |
| "end": 931, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Adversarial v.s. Multi-task Training", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "Therefore, we perform language test with the MTL and AT (GAN based) encoders, and the results are shown in Table 5 , Columns 6-7. The results indicate that the MTL encoders consistently perform better than the AT encoders, which verifies our hypothesis that adversarial training motivates the contextual encoders to encode languageagnostic features.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 107, |
| "end": 114, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Adversarial v.s. Multi-task Training", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "To analyze the effects of the auxiliary languages in cross-language transfer via adversarial training, we perform experiments by pairing up 11 the source language (English) with six different lan- Table 5 : Comparison between adversarial training (AT) and multi-task learning (MTL) of the contextual encoders.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 197, |
| "end": 204, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Impact of Auxiliary Languages", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "Columns 2-5 demonstrate the parsing performances (UAS%/LAS%, excluding punctuation) on the auxiliary languages and average of the 29 languages. Columns 6-7 present accuracy (%) of the language label prediction test. ' \u2020' indicates that the performance is higher than the baseline performance (shown in the 2nd column of guages (spanning Germanic, Romance, Slavic, and Latin language families) as the auxiliary language. The average cross-lingual transfer performances are presented in Table 6 and the results suggest that Russian (ru) and German (de) are better candidates for auxiliary languages. We then dive deeper into the effects of auxiliary languages trying to understand whether auxiliary languages particularly benefit target languages that are closer to them 12 or from the same family. Intuitively, we would assume when the auxiliary language has a smaller average distance to all the target languages, the cross-lingual transfer performance would be better. However, from the results in Table 6 , we do not see such a pattern. For example, Portuguese (pt) has the smallest average distance to other languages among the aux-iliary languages we tested, but it is not among the better auxiliary languages.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 485, |
| "end": 492, |
| "text": "Table 6", |
| "ref_id": "TABREF6" |
| }, |
| { |
| "start": 999, |
| "end": 1006, |
| "text": "Table 6", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Impact of Auxiliary Languages", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "We further zoom in the cross-lingual transfer improvements for each language families as shown in Table 7 . We hypothesis that the auxiliary languages to be more helpful for the target languages in the same family. The experimental results moderately correlate with our expectation. Specifically, the Germanic family benefits the most from employing German (de) as the auxiliary language; similarly Slavic family with Russian (ru) as the auxiliary language (although German as the auxiliary language brings similar improvements). The Romance family is an exception because it benefits the least from using French (fr) as the auxiliary language. This may due to the fact that French is too closed to English, thus is less suitable to be used as an auxiliary language.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 98, |
| "end": 105, |
| "text": "Table 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Impact of Auxiliary Languages", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "Unsupervised Cross-lingual Parsing. Unsupervised cross-lingual transfer for dependency parsing has been studied over the past few years (Agi\u0107 et al., 2014; Ma and Xia, 2014; Xiao and Guo, 2014; Tiedemann, 2015; Guo et al., 2015; Aufrant et al., 2015; Rasooli and Collins, 2015; Duong et al., 2015; Schlichtkrull and S\u00f8gaard, 2017; Ahmad et al., 2019; Rasooli and Collins, 2019; He et al., 2019) . Here, \"unsupervised transfer\" refers to the setting where a parsing model trained only on the source language is directly Lang (en,ru) -en (en,fr) -en (en,de) -en (en,la) -en IE.Slavic Family hr 1.24/0.90 0.43/-0.45 1.52/1.02 0.06/-0.13 sl 0.35/0.53 -0.55/-0.60 -0.04/0.14 -0.17/-0.50 uk 1.23/1.32 0.26/0.09 1.54/1. 33 Table 7 : Average cross-lingual performance difference between the SelfAtt-Graph parser trained on the source (en) and an auxiliary (x) language and the SelfAtt-Graph parser trained only on English (en) language (UAS%/LAS%, excluding punctuation). We use multilingual BERT in this set of experiments.", |
| "cite_spans": [ |
| { |
| "start": 136, |
| "end": 155, |
| "text": "(Agi\u0107 et al., 2014;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 156, |
| "end": 173, |
| "text": "Ma and Xia, 2014;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 174, |
| "end": 193, |
| "text": "Xiao and Guo, 2014;", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 194, |
| "end": 210, |
| "text": "Tiedemann, 2015;", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 211, |
| "end": 228, |
| "text": "Guo et al., 2015;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 229, |
| "end": 250, |
| "text": "Aufrant et al., 2015;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 251, |
| "end": 277, |
| "text": "Rasooli and Collins, 2015;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 278, |
| "end": 297, |
| "text": "Duong et al., 2015;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 298, |
| "end": 330, |
| "text": "Schlichtkrull and S\u00f8gaard, 2017;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 331, |
| "end": 350, |
| "text": "Ahmad et al., 2019;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 351, |
| "end": 377, |
| "text": "Rasooli and Collins, 2019;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 378, |
| "end": 394, |
| "text": "He et al., 2019)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 713, |
| "end": 715, |
| "text": "33", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 716, |
| "end": 723, |
| "text": "Table 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "transferred to the target languages. In this work, we relax the setting by allowing unlabeled data from one or more auxiliary (helper) languages other than the source language. This setting has been explored in a few prior works. Cohen et al. (2011) learn a generative target language parser with unannotated target data as a linear interpolation of the source language parsers. T\u00e4ckstr\u00f6m et al. (2013) adopt unlabeled target language data and a learning method that can incorporate diverse knowledge sources through ambiguous labeling for transfer parsing. In comparison, we leverage unlabeled auxiliary language data to learn language-agnostic contextual representations to improve cross-lingual transfer.", |
| "cite_spans": [ |
| { |
| "start": 379, |
| "end": 402, |
| "text": "T\u00e4ckstr\u00f6m et al. (2013)", |
| "ref_id": "BIBREF42" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Multilingual Representation Learning. The basic of the unsupervised cross-lingual parsing is that we can align the representations of different languages into the same space, at least at the word level. The recent development of bilingual or multilingual word embeddings provide us with such shared representations. We refer the readers to the surveys of Ruder et al. (2017) and Glava\u0161 et al. (2019) for details. The main idea is that we can train a model on top of the source language embeddings which are aligned to the same space as the target language embeddings and thus all the model parameters can be directly shared across languages. During transfer to a target language, we simply replace the source language embeddings with the target language embeddings. This idea is further extended to learn multilingual contextualized word representations, for example, multilingual BERT (Devlin et al., 2019) , have been shown very effective for many crosslingual transfer tasks (Wu and Dredze, 2019) . In this work, we show that further improvements can be achieved by adaptating the contextual encoders via unlabeled auxiliary languages even when the encoders are trained on top of multilingual BERT.", |
| "cite_spans": [ |
| { |
| "start": 355, |
| "end": 374, |
| "text": "Ruder et al. (2017)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 379, |
| "end": 399, |
| "text": "Glava\u0161 et al. (2019)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 886, |
| "end": 907, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 978, |
| "end": 999, |
| "text": "(Wu and Dredze, 2019)", |
| "ref_id": "BIBREF46" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Adversarial Training. The concept of adversarial training via Generative Adversarial Networks (GANs) Szegedy et al., 2014; Goodfellow et al., 2015) was initially introduced in computer vision for image classification and received enormous success in improving model's robustness on input images with perturbations. Later many variants of GANs Gulrajani et al., 2017) were proposed to improve its' training stability. In NLP, adversarial training was first utilized for domain adaptation (Ganin et al., 2016) . Since then adversarial training has started to receive an increasing interest in the NLP community and applied to many NLP applications including part-of-speech (POS) tagging (Gui et al., 2017; Yasunaga et al., 2018) , dependency parsing (Sato et al., 2017) , relation extraction (Wu et al., 2017 ), text classification (Miyato et al., 2017; , dialogue generation (Li et al., 2017) .", |
| "cite_spans": [ |
| { |
| "start": 101, |
| "end": 122, |
| "text": "Szegedy et al., 2014;", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 123, |
| "end": 147, |
| "text": "Goodfellow et al., 2015)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 343, |
| "end": 366, |
| "text": "Gulrajani et al., 2017)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 487, |
| "end": 507, |
| "text": "(Ganin et al., 2016)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 685, |
| "end": 703, |
| "text": "(Gui et al., 2017;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 704, |
| "end": 726, |
| "text": "Yasunaga et al., 2018)", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 748, |
| "end": 767, |
| "text": "(Sato et al., 2017)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 790, |
| "end": 806, |
| "text": "(Wu et al., 2017", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 830, |
| "end": 851, |
| "text": "(Miyato et al., 2017;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 874, |
| "end": 891, |
| "text": "(Li et al., 2017)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In the context of cross-lingual NLP tasks, many recent works adopted adversarial training, such as in sequence tagging (Adel et al., 2018 ), text classification (Xu and Yang, 2017; , word embedding induction Lample et al., 2018) , relation classification (Zou et al., 2018) , opinion mining (Wang and Pan, 2018) , and question-question similarity reranking (Joty et al., 2017) . However, existing approaches only consider using the target language as the auxiliary language. It is unclear whether the language invariant representations learned by previously proposed methods can perform well on a wide variety of unseen languages. To the best of our knowledge, we are the first to study the effects of language-agnostic representations on a broad spectrum of languages.", |
| "cite_spans": [ |
| { |
| "start": 119, |
| "end": 137, |
| "text": "(Adel et al., 2018", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 161, |
| "end": 180, |
| "text": "(Xu and Yang, 2017;", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 208, |
| "end": 228, |
| "text": "Lample et al., 2018)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 255, |
| "end": 273, |
| "text": "(Zou et al., 2018)", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 291, |
| "end": 311, |
| "text": "(Wang and Pan, 2018)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 357, |
| "end": 376, |
| "text": "(Joty et al., 2017)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In this paper, we study learning language invariant contextual encoders for cross-lingual transfer. Specifically, we leverage unlabeled sentences from auxiliary languages and adversarial training to induce language-agnostic encoders to improve the performances of the cross-lingual dependency parsing. Experiments and analysis using English as the source language and six foreign languages as the auxiliary languages not only show improvements on cross-lingual dependency parsing, but also demonstrates that contextual encoders successfully learns not to capture language-dependent features through adversarial training. In the future, we plan to investigate the effectiveness of adversarial training for multi-source transfer to parsing and other cross-lingual NLP applications.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "https://github.com/wasiahmad/cross lingual parsing", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Ahmad et al. (2019) studied order-sensitive and order-free models and their performances in cross-lingual transfer. In this work, we adopt two typical ones and study the effects of adversarial training on them.3 We also experimented with max-pooling and weighted pooling but average pooling resulted in stable performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/uclanlp/CrossLingualDepParser 5 We adopt the same hyper-parameters, experiment settings and evaluation metrics as those in(Ahmad et al., 2019).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/huggingface/pytorch-transformers7 We concatenate the word and POS representations. In our future work, we will conduct transfer learning for both POS tagging and dependency parsing.8 We want to cover languages from different families and with varying distances from the source language (English). 9 With the source (English) and six auxiliary languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We also conduct experiments on multiple languages as the auxiliary language. For GAN and WGAN-based training, we concatenate the corpora of multiple languages and treat them as one auxiliary language. In these set of experiments, we do not observe any apparent improvements.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The language distances are computed based on word order characteristics as suggested in Ahmad et al. (2019).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank the anonymous reviewers for their helpful feedback. This work was supported in part by National Science Foundation Grant IIS-1760523.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Adversarial neural networks for cross-lingual sequence tagging", |
| "authors": [ |
| { |
| "first": "Heike", |
| "middle": [], |
| "last": "Adel", |
| "suffix": "" |
| }, |
| { |
| "first": "Anton", |
| "middle": [], |
| "last": "Bryl", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "Aliaksei", |
| "middle": [], |
| "last": "Severyn", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1808.04736" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Heike Adel, Anton Bryl, David Weiss, and Aliak- sei Severyn. 2018. Adversarial neural networks for cross-lingual sequence tagging. arXiv preprint arXiv:1808.04736.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Cross-lingual dependency parsing of related languages with rich morphosyntactic tagsets", |
| "authors": [ |
| { |
| "first": "Zeljko", |
| "middle": [], |
| "last": "Agi\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00f6rg", |
| "middle": [], |
| "last": "Tiedemann", |
| "suffix": "" |
| }, |
| { |
| "first": "Kaja", |
| "middle": [], |
| "last": "Dobrovoljc", |
| "suffix": "" |
| }, |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Krek", |
| "suffix": "" |
| }, |
| { |
| "first": "Danijela", |
| "middle": [], |
| "last": "Merkler", |
| "suffix": "" |
| }, |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Mo\u017ee", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "EMNLP 2014 Workshop on Language Technology for Closely Related Languages and Language Variants", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zeljko Agi\u0107, J\u00f6rg Tiedemann, Kaja Dobrovoljc, Si- mon Krek, Danijela Merkler, and Sara Mo\u017ee. 2014. Cross-lingual dependency parsing of related lan- guages with rich morphosyntactic tagsets. In EMNLP 2014 Workshop on Language Technology for Closely Related Languages and Language Vari- ants.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "On difficulties of cross-lingual transfer with order differences: A case study on dependency parsing", |
| "authors": [ |
| { |
| "first": "Zhisong", |
| "middle": [], |
| "last": "Wasi Uddin Ahmad", |
| "suffix": "" |
| }, |
| { |
| "first": "Zuezhe", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai-Wei", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "Nanyun", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wasi Uddin Ahmad, Zhisong Zhang, Zuezhe Ma, Ed- uard Hovy, Kai-Wei Chang, and Nanyun Peng. 2019. On difficulties of cross-lingual transfer with order differences: A case study on dependency pars- ing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Wasserstein generative adversarial networks", |
| "authors": [ |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Arjovsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Soumith", |
| "middle": [], |
| "last": "Chintala", |
| "suffix": "" |
| }, |
| { |
| "first": "L\u00e9on", |
| "middle": [], |
| "last": "Bottou", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 34th International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "214--223", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Martin Arjovsky, Soumith Chintala, and L\u00e9on Bottou. 2017. Wasserstein generative adversarial networks. In Proceedings of the 34th International Conference on Machine Learning, pages 214-223. PMLR.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Zero-resource dependency parsing: Boosting delexicalized cross-lingual transfer with linguistic knowledge", |
| "authors": [ |
| { |
| "first": "Lauriane", |
| "middle": [], |
| "last": "Aufrant", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Wisniewski", |
| "suffix": "" |
| }, |
| { |
| "first": "Fran\u00e7ois", |
| "middle": [], |
| "last": "Yvon", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "COLING 2016, the 26th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "119--130", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lauriane Aufrant, Guillaume Wisniewski, and Fran\u00e7ois Yvon. 2015. Zero-resource dependency parsing: Boosting delexicalized cross-lingual transfer with linguistic knowledge. In COLING 2016, the 26th In- ternational Conference on Computational Linguis- tics, pages 119-130. The COLING 2016 Organizing Committee.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Enriching word vectors with subword information", |
| "authors": [ |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "5", |
| "issue": "", |
| "pages": "135--146", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the Associa- tion for Computational Linguistics, 5:135-146.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Multinomial adversarial networks for multi-domain text classification", |
| "authors": [ |
| { |
| "first": "Xilun", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Cardie", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1226--1240", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xilun Chen and Claire Cardie. 2018. Multinomial ad- versarial networks for multi-domain text classifica- tion. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, pages 1226-1240.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Adversarial deep averaging networks for cross-lingual sentiment classification", |
| "authors": [ |
| { |
| "first": "Xilun", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Athiwaratkun", |
| "suffix": "" |
| }, |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Cardie", |
| "suffix": "" |
| }, |
| { |
| "first": "Kilian", |
| "middle": [], |
| "last": "Weinberger", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "6", |
| "issue": "", |
| "pages": "557--570", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xilun Chen, Yu Sun, Ben Athiwaratkun, Claire Cardie, and Kilian Weinberger. 2018. Adversarial deep av- eraging networks for cross-lingual sentiment classi- fication. Transactions of the Association for Com- putational Linguistics, 6:557-570.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Unsupervised structure prediction with nonparallel multilingual guidance", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Shay", |
| "suffix": "" |
| }, |
| { |
| "first": "Dipanjan", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "50--61", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shay B. Cohen, Dipanjan Das, and Noah A. Smith. 2011. Unsupervised structure prediction with non- parallel multilingual guidance. In Proceedings of the 2011 Conference on Empirical Methods in Natu- ral Language Processing, pages 50-61, Edinburgh, Scotland, UK. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Deep biaffine attention for neural dependency parsing", |
| "authors": [ |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Dozat", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Timothy Dozat and Christopher D Manning. 2017. Deep biaffine attention for neural dependency pars- ing. Internation Conference on Learning Represen- tations.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Cross-lingual transfer for unsupervised dependency parsing without parallel data", |
| "authors": [ |
| { |
| "first": "Long", |
| "middle": [], |
| "last": "Duong", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bird", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Cook", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Nineteenth Conference on Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "113--122", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Long Duong, Trevor Cohn, Steven Bird, and Paul Cook. 2015. Cross-lingual transfer for unsupervised dependency parsing without parallel data. In Pro- ceedings of the Nineteenth Conference on Computa- tional Natural Language Learning, pages 113-122.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Domain-adversarial training of neural networks", |
| "authors": [ |
| { |
| "first": "Yaroslav", |
| "middle": [], |
| "last": "Ganin", |
| "suffix": "" |
| }, |
| { |
| "first": "Evgeniya", |
| "middle": [], |
| "last": "Ustinova", |
| "suffix": "" |
| }, |
| { |
| "first": "Hana", |
| "middle": [], |
| "last": "Ajakan", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Germain", |
| "suffix": "" |
| }, |
| { |
| "first": "Hugo", |
| "middle": [], |
| "last": "Larochelle", |
| "suffix": "" |
| }, |
| { |
| "first": "Fran\u00e7ois", |
| "middle": [], |
| "last": "Laviolette", |
| "suffix": "" |
| }, |
| { |
| "first": "Mario", |
| "middle": [], |
| "last": "Marchand", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Lempitsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "The Journal of Machine Learning Research", |
| "volume": "17", |
| "issue": "1", |
| "pages": "2096--2030", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yaroslav Ganin, Evgeniya Ustinova, Hana Ajakan, Pascal Germain, Hugo Larochelle, Fran\u00e7ois Lavi- olette, Mario Marchand, and Victor Lempitsky. 2016. Domain-adversarial training of neural net- works. The Journal of Machine Learning Research, 17(1):2096-2030.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "How to (properly) evaluate crosslingual word embeddings: On strong baselines, comparative analyses, and some misconceptions", |
| "authors": [ |
| { |
| "first": "Goran", |
| "middle": [], |
| "last": "Glava\u0161", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Litschko", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "710--721", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Goran Glava\u0161, Robert Litschko, Sebastian Ruder, and Ivan Vuli\u0107. 2019. How to (properly) evaluate cross- lingual word embeddings: On strong baselines, comparative analyses, and some misconceptions. In Proceedings of the 57th Annual Meeting of the As- sociation for Computational Linguistics, pages 710- 721.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Generative adversarial nets", |
| "authors": [ |
| { |
| "first": "Ian", |
| "middle": [], |
| "last": "Goodfellow", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [], |
| "last": "Pouget-Abadie", |
| "suffix": "" |
| }, |
| { |
| "first": "Mehdi", |
| "middle": [], |
| "last": "Mirza", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Warde-Farley", |
| "suffix": "" |
| }, |
| { |
| "first": "Sherjil", |
| "middle": [], |
| "last": "Ozair", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Courville", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "2672--2680", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. 2014. Generative ad- versarial nets. In Advances in neural information processing systems, pages 2672-2680.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Explaining and harnessing adversarial examples", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Ian", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathon", |
| "middle": [], |
| "last": "Goodfellow", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Shlens", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Szegedy", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Internation Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ian J Goodfellow, Jonathon Shlens, and Christian Szegedy. 2015. Explaining and harnessing adversar- ial examples. In Internation Conference on Learn- ing Representations.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Part-of-speech tagging for twitter with adversarial neural networks", |
| "authors": [ |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Gui", |
| "suffix": "" |
| }, |
| { |
| "first": "Qi", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Haoran", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Minlong", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuanjing", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2411--2420", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tao Gui, Qi Zhang, Haoran Huang, Minlong Peng, and Xuanjing Huang. 2017. Part-of-speech tagging for twitter with adversarial neural networks. In Pro- ceedings of the 2017 Conference on Empirical Meth- ods in Natural Language Processing, pages 2411- 2420.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Improved training of wasserstein gans", |
| "authors": [ |
| { |
| "first": "Ishaan", |
| "middle": [], |
| "last": "Gulrajani", |
| "suffix": "" |
| }, |
| { |
| "first": "Faruk", |
| "middle": [], |
| "last": "Ahmed", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Arjovsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Dumoulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron C", |
| "middle": [], |
| "last": "Courville", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5767--5777", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ishaan Gulrajani, Faruk Ahmed, Martin Arjovsky, Vin- cent Dumoulin, and Aaron C Courville. 2017. Im- proved training of wasserstein gans. In Advances in Neural Information Processing Systems, pages 5767-5777.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Cross-lingual dependency parsing based on distributed representations", |
| "authors": [ |
| { |
| "first": "Jiang", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Wanxiang", |
| "middle": [], |
| "last": "Che", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Yarowsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Haifeng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "1234--1244", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiang Guo, Wanxiang Che, David Yarowsky, Haifeng Wang, and Ting Liu. 2015. Cross-lingual depen- dency parsing based on distributed representations. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Lan- guage Processing (Volume 1: Long Papers), vol- ume 1, pages 1234-1244.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Cross-lingual syntactic transfer through unsupervised adaptation of invertible projections", |
| "authors": [ |
| { |
| "first": "Junxian", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhisong", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Taylor", |
| "middle": [], |
| "last": "Berg-Kiripatrick", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3211--3223", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Junxian He, Zhisong Zhang, Taylor Berg-Kiripatrick, and Graham Neubig. 2019. Cross-lingual syntactic transfer through unsupervised adaptation of invert- ible projections. In Proceedings of the 57th Annual Meeting of the Association for Computational Lin- guistics, pages 3211-3223.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Cross-language learning with adversarial neural networks", |
| "authors": [ |
| { |
| "first": "Shafiq", |
| "middle": [], |
| "last": "Joty", |
| "suffix": "" |
| }, |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "Llu\u00eds", |
| "middle": [], |
| "last": "M\u00e0rquez", |
| "suffix": "" |
| }, |
| { |
| "first": "Israa", |
| "middle": [], |
| "last": "Jaradat", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 21st Conference on Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "226--237", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shafiq Joty, Preslav Nakov, Llu\u00eds M\u00e0rquez, and Israa Jaradat. 2017. Cross-language learning with ad- versarial neural networks. In Proceedings of the 21st Conference on Computational Natural Lan- guage Learning (CoNLL 2017), pages 226-237.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Cross-lingual transfer learning for pos tagging without cross-lingual resources", |
| "authors": [ |
| { |
| "first": "Joo-Kyung", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Young-Bum", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruhi", |
| "middle": [], |
| "last": "Sarikaya", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Fosler-Lussier", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2832--2838", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joo-Kyung Kim, Young-Bum Kim, Ruhi Sarikaya, and Eric Fosler-Lussier. 2017. Cross-lingual transfer learning for pos tagging without cross-lingual re- sources. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Process- ing, pages 2832-2838.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Adam: A method for stochastic optimization. International Conference on Learning Representations", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Neural cross-lingual coreference resolution and its application to entity linking", |
| "authors": [ |
| { |
| "first": "Gourab", |
| "middle": [], |
| "last": "Kundu", |
| "suffix": "" |
| }, |
| { |
| "first": "Avi", |
| "middle": [], |
| "last": "Sil", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Florian", |
| "suffix": "" |
| }, |
| { |
| "first": "Wael", |
| "middle": [], |
| "last": "Hamza", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "395--400", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gourab Kundu, Avi Sil, Radu Florian, and Wael Hamza. 2018. Neural cross-lingual coreference res- olution and its application to entity linking. In Pro- ceedings of the 56th Annual Meeting of the Associa- tion for Computational Linguistics (Volume 2: Short Papers), pages 395-400. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Word translation without parallel data", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Ludovic", |
| "middle": [], |
| "last": "Denoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Herv\u00e9", |
| "middle": [], |
| "last": "J\u00e9gou", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Internation Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Lample, Alexis Conneau, Ludovic Denoyer, Herv\u00e9 J\u00e9gou, et al. 2018. Word translation without parallel data. In Internation Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Adversarial learning for neural dialogue generation", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Will", |
| "middle": [], |
| "last": "Monroe", |
| "suffix": "" |
| }, |
| { |
| "first": "Tianlin", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "S\u00e9bastien", |
| "middle": [], |
| "last": "Jean", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2157--2169", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D17-1230" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li, Will Monroe, Tianlin Shi, S\u00e9bastien Jean, Alan Ritter, and Dan Jurafsky. 2017. Adversarial learning for neural dialogue generation. In Proceed- ings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 2157-2169, Copenhagen, Denmark. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Adversarial multi-task learning for text classification", |
| "authors": [ |
| { |
| "first": "Pengfei", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xipeng", |
| "middle": [], |
| "last": "Qiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuanjing", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1--10", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-1001" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pengfei Liu, Xipeng Qiu, and Xuanjing Huang. 2017. Adversarial multi-task learning for text classifica- tion. In Proceedings of the 55th Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1-10, Vancouver, Canada. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Stackpointer networks for dependency parsing", |
| "authors": [ |
| { |
| "first": "Xuezhe", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Zecong", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingzhou", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Nanyun", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xuezhe Ma, Zecong Hu, Jingzhou Liu, Nanyun Peng, Graham Neubig, and Eduard Hovy. 2018. Stack- pointer networks for dependency parsing. In Pro- ceedings of the 56th Annual Meeting of the Associa- tion for Computational Linguistics (Volume 1: Long Papers).", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Unsupervised dependency parsing with transferring distribution via parallel guidance and entropy regularization", |
| "authors": [ |
| { |
| "first": "Xuezhe", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Fei", |
| "middle": [], |
| "last": "Xia", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1337--1348", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xuezhe Ma and Fei Xia. 2014. Unsupervised depen- dency parsing with transferring distribution via par- allel guidance and entropy regularization. In Pro- ceedings of the 52nd Annual Meeting of the Associa- tion for Computational Linguistics (Volume 1: Long Papers), pages 1337-1348.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Adversarial training methods for semisupervised text classification", |
| "authors": [ |
| { |
| "first": "Takeru", |
| "middle": [], |
| "last": "Miyato", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Andrew", |
| "suffix": "" |
| }, |
| { |
| "first": "Ian", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Goodfellow", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Internation Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Takeru Miyato, Andrew M Dai, and Ian Goodfel- low. 2017. Adversarial training methods for semi- supervised text classification. In Internation Con- ference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Universal dependencies 2.2. LIN-DAT/CLARIN digital library at the Institute of Formal and Applied Linguistics (\u00daFAL", |
| "authors": [ |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| }, |
| { |
| "first": "Mitchell", |
| "middle": [], |
| "last": "Abrams", |
| "suffix": "" |
| }, |
| { |
| "first": "\u017deljko", |
| "middle": [], |
| "last": "Agi\u0107", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Faculty of Mathematics and Physics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joakim Nivre, Mitchell Abrams,\u017deljko Agi\u0107, and et al. 2018. Universal dependencies 2.2. LIN- DAT/CLARIN digital library at the Institute of For- mal and Applied Linguistics (\u00daFAL), Faculty of Mathematics and Physics, Charles University.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Universal dependencies v1: A multilingual treebank collection", |
| "authors": [ |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie-Catherine", |
| "middle": [], |
| "last": "De Marneffe", |
| "suffix": "" |
| }, |
| { |
| "first": "Filip", |
| "middle": [], |
| "last": "Ginter", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Hajic", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Ryan", |
| "suffix": "" |
| }, |
| { |
| "first": "Slav", |
| "middle": [], |
| "last": "Mcdonald", |
| "suffix": "" |
| }, |
| { |
| "first": "Sampo", |
| "middle": [], |
| "last": "Petrov", |
| "suffix": "" |
| }, |
| { |
| "first": "Natalia", |
| "middle": [], |
| "last": "Pyysalo", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Silveira", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joakim Nivre, Marie-Catherine De Marneffe, Filip Ginter, Yoav Goldberg, Jan Hajic, Christopher D Manning, Ryan T McDonald, Slav Petrov, Sampo Pyysalo, Natalia Silveira, et al. 2016. Universal de- pendencies v1: A multilingual treebank collection. In Proceedings of the Tenth International Confer- ence on Language Resources and Evaluation (LREC 2016).", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Density-driven cross-lingual transfer of dependency parsers", |
| "authors": [ |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Sadegh", |
| "suffix": "" |
| }, |
| { |
| "first": "Rasooli", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "328--338", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohammad Sadegh Rasooli and Michael Collins. 2015. Density-driven cross-lingual transfer of de- pendency parsers. In Proceedings of the 2015 Con- ference on Empirical Methods in Natural Language Processing, pages 328-338.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Low-resource syntactic transfer with unsupervised source reordering", |
| "authors": [ |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Sadegh", |
| "suffix": "" |
| }, |
| { |
| "first": "Rasooli", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohammad Sadegh Rasooli and Michael Collins. 2019. Low-resource syntactic transfer with unsuper- vised source reordering. Proceedings of the 2019", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "A survey of cross-lingual embedding models", |
| "authors": [ |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| }, |
| { |
| "first": "Anders", |
| "middle": [], |
| "last": "S\u00f8gaard", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vulic", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1706.04902" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebastian Ruder, Anders S\u00f8gaard, and Ivan Vulic. 2017. A survey of cross-lingual embedding models. arXiv preprint arXiv:1706.04902.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Adversarial training for crossdomain universal dependency parsing", |
| "authors": [ |
| { |
| "first": "Motoki", |
| "middle": [], |
| "last": "Sato", |
| "suffix": "" |
| }, |
| { |
| "first": "Hitoshi", |
| "middle": [], |
| "last": "Manabe", |
| "suffix": "" |
| }, |
| { |
| "first": "Hiroshi", |
| "middle": [], |
| "last": "Noji", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuji", |
| "middle": [], |
| "last": "Matsumoto", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the CoNLL 2017 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies", |
| "volume": "", |
| "issue": "", |
| "pages": "71--79", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Motoki Sato, Hitoshi Manabe, Hiroshi Noji, and Yuji Matsumoto. 2017. Adversarial training for cross- domain universal dependency parsing. In Proceed- ings of the CoNLL 2017 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies, pages 71-79.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Cross-lingual dependency parsing with late decoding for truly low-resource languages", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Schlichtkrull", |
| "suffix": "" |
| }, |
| { |
| "first": "Anders", |
| "middle": [], |
| "last": "S\u00f8gaard", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "220--229", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Schlichtkrull and Anders S\u00f8gaard. 2017. Cross-lingual dependency parsing with late decod- ing for truly low-resource languages. In Proceed- ings of the 15th Conference of the European Chap- ter of the Association for Computational Linguistics: Volume 1, Long Papers, pages 220-229. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Self-attention with relative position representations", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Shaw", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "2", |
| "issue": "", |
| "pages": "464--468", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Shaw, Jakob Uszkoreit, and Ashish Vaswani. 2018. Self-attention with relative position represen- tations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 2 (Short Papers), pages 464-468. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Neural cross-lingual entity linking", |
| "authors": [ |
| { |
| "first": "Avirup", |
| "middle": [], |
| "last": "Sil", |
| "suffix": "" |
| }, |
| { |
| "first": "Gourab", |
| "middle": [], |
| "last": "Kundu", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Florian", |
| "suffix": "" |
| }, |
| { |
| "first": "Wael", |
| "middle": [], |
| "last": "Hamza", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Thirty-Second AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Avirup Sil, Gourab Kundu, Radu Florian, and Wael Hamza. 2018. Neural cross-lingual entity linking. In Thirty-Second AAAI Conference on Artificial In- telligence.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Offline bilingual word vectors, orthogonal transformations and the inverted softmax. Internation Conference on Learning Representations", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Samuel", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [ |
| "P" |
| ], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Turban", |
| "suffix": "" |
| }, |
| { |
| "first": "Nils", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Hamblin", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hammerla", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel L Smith, David HP Turban, Steven Hamblin, and Nils Y Hammerla. 2017. Offline bilingual word vectors, orthogonal transformations and the inverted softmax. Internation Conference on Learning Rep- resentations.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Intriguing properties of neural networks", |
| "authors": [ |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Szegedy", |
| "suffix": "" |
| }, |
| { |
| "first": "Wojciech", |
| "middle": [], |
| "last": "Zaremba", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Joan", |
| "middle": [], |
| "last": "Bruna", |
| "suffix": "" |
| }, |
| { |
| "first": "Dumitru", |
| "middle": [], |
| "last": "Erhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Ian", |
| "middle": [], |
| "last": "Goodfellow", |
| "suffix": "" |
| }, |
| { |
| "first": "Rob", |
| "middle": [], |
| "last": "Fergus", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Internation Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. 2014. Intriguing properties of neural networks. In Internation Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Target language adaptation of discriminative transfer parsers", |
| "authors": [ |
| { |
| "first": "Oscar", |
| "middle": [], |
| "last": "T\u00e4ckstr\u00f6m", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Mcdonald", |
| "suffix": "" |
| }, |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1061--1071", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oscar T\u00e4ckstr\u00f6m, Ryan McDonald, and Joakim Nivre. 2013. Target language adaptation of discriminative transfer parsers. In Proceedings of the 2013 Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, pages 1061-1071. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Cross-lingual dependency parsing with universal dependencies and predicted pos labels", |
| "authors": [ |
| { |
| "first": "J\u00f6rg", |
| "middle": [], |
| "last": "Tiedemann", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Third International Conference on Dependency Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "340--349", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J\u00f6rg Tiedemann. 2015. Cross-lingual dependency parsing with universal dependencies and predicted pos labels. In Proceedings of the Third International Conference on Dependency Linguistics (Depling 2015), pages 340-349.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Lecture 6.5-rmsprop: Divide the gradient by a running average of its recent magnitude. COURSERA: Neural networks for machine learning", |
| "authors": [ |
| { |
| "first": "Tijmen", |
| "middle": [], |
| "last": "Tieleman", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "4", |
| "issue": "", |
| "pages": "26--31", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tijmen Tieleman and Geoffrey Hinton. 2012. Lecture 6.5-rmsprop: Divide the gradient by a running av- erage of its recent magnitude. COURSERA: Neural networks for machine learning, 4(2):26-31.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Transitionbased adversarial network for cross-lingual aspect extraction", |
| "authors": [ |
| { |
| "first": "Wenya", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sinno Jialin Pan", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence, IJCAI-18", |
| "volume": "", |
| "issue": "", |
| "pages": "4475--4481", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wenya Wang and Sinno Jialin Pan. 2018. Transition- based adversarial network for cross-lingual aspect extraction. In Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intel- ligence, IJCAI-18, pages 4475-4481. International Joint Conferences on Artificial Intelligence Organi- zation.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Beto, bentz, becas: The surprising cross-lingual effectiveness of bert", |
| "authors": [ |
| { |
| "first": "Shijie", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Dredze", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shijie Wu and Mark Dredze. 2019. Beto, bentz, be- cas: The surprising cross-lingual effectiveness of bert. Proceedings of the 2019 Conference on Em- pirical Methods in Natural Language Processing (EMNLP).", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Adversarial training for relation extraction", |
| "authors": [ |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Bamman", |
| "suffix": "" |
| }, |
| { |
| "first": "Stuart", |
| "middle": [], |
| "last": "Russell", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1778--1783", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yi Wu, David Bamman, and Stuart Russell. 2017. Ad- versarial training for relation extraction. In Proceed- ings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 1778-1783.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Distributed word representation learning for cross-lingual dependency parsing", |
| "authors": [ |
| { |
| "first": "Min", |
| "middle": [], |
| "last": "Xiao", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuhong", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the Eighteenth Conference on Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "119--129", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Min Xiao and Yuhong Guo. 2014. Distributed word representation learning for cross-lingual dependency parsing. In Proceedings of the Eighteenth Confer- ence on Computational Natural Language Learning, pages 119-129.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Neural crosslingual named entity recognition with minimal resources", |
| "authors": [ |
| { |
| "first": "Jiateng", |
| "middle": [], |
| "last": "Xie", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "369--379", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiateng Xie, Zhilin Yang, Graham Neubig, Noah A. Smith, and Jaime Carbonell. 2018. Neural cross- lingual named entity recognition with minimal re- sources. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Process- ing, pages 369-379. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "Cross-lingual distillation for text classification", |
| "authors": [ |
| { |
| "first": "Ruochen", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ruochen Xu and Yiming Yang. 2017. Cross-lingual distillation for text classification. In Proceedings of the 55th Annual Meeting of the Association for Com- putational Linguistics (Volume 1: Long Papers).", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "Robust multilingual part-of-speech tagging via adversarial training", |
| "authors": [ |
| { |
| "first": "Michihiro", |
| "middle": [], |
| "last": "Yasunaga", |
| "suffix": "" |
| }, |
| { |
| "first": "Jungo", |
| "middle": [], |
| "last": "Kasai", |
| "suffix": "" |
| }, |
| { |
| "first": "Dragomir", |
| "middle": [], |
| "last": "Radev", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "976--986", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-1089" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michihiro Yasunaga, Jungo Kasai, and Dragomir Radev. 2018. Robust multilingual part-of-speech tagging via adversarial training. In Proceedings of the 2018 Conference of the North American Chap- ter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Pa- pers), pages 976-986, New Orleans, Louisiana. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "Adversarial training for unsupervised bilingual lexicon induction", |
| "authors": [ |
| { |
| "first": "Meng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Huanbo", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1959--1970", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Meng Zhang, Yang Liu, Huanbo Luan, and Maosong Sun. 2017. Adversarial training for unsupervised bilingual lexicon induction. In Proceedings of the 55th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), pages 1959-1970.", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "Adversarial feature adaptation for cross-lingual relation classification", |
| "authors": [ |
| { |
| "first": "Bowei", |
| "middle": [], |
| "last": "Zou", |
| "suffix": "" |
| }, |
| { |
| "first": "Zengzhuang", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Hong", |
| "suffix": "" |
| }, |
| { |
| "first": "Guodong", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "437--448", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bowei Zou, Zengzhuang Xu, Yu Hong, and Guodong Zhou. 2018. Adversarial feature adaptation for cross-lingual relation classification. In Proceedings of the 27th International Conference on Computa- tional Linguistics, pages 437-448.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Avg. 0.57/0.58 0.02/0.03 0.54/0.53 0.64/0.69 IE.Germanic Family en -0.42/-0.35 -0.38/-0.24 -0.35/-0.25 -0.15/-0.20 no -0.38/-0.27 -0.31/-0.39 -0.41/-0.15 -0.22/-0.24 sv -0.17/-0.01 0.03/0.24 -0.12/0.35 -0.02/0.18 da 0.00/0.33 0.04/0.15 -0.15/0.08 -0.46/-0.25 nl 0.13/0.41 0.18/-0.07 0.95/0.89 0.57/0.42 de 0.42/0.45 -0.62/-0.58 1.41/1.40 0.25/0.43 Avg. -0.07/0.09 -0.18/-0.15 0.22/0.39 0.00/0.06" |
| }, |
| "TABREF0": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>", |
| "text": "The selected 29 languages for experiments from UD v2.2(Nivre et al., 2018)." |
| }, |
| "TABREF1": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>Lang</td><td colspan=\"2\">Multilingual Word Embeddings (en) (en-fr) (en-ru)</td><td>(en)</td><td colspan=\"2\">Multilingual BERT (en-fr)</td><td>(en-ru)</td></tr><tr><td>en</td><td colspan=\"4\">90.2327/53.78</td><td>75.62/54.29</td></tr><tr><td>fi</td><td>66.11/48.73 65.84/48.61</td><td>66.28/48.82</td><td colspan=\"2\">71.59/53.81 71.35/53.63</td><td>71.74/53.79</td></tr><tr><td>et</td><td>65.01/44</td><td/><td/><td/></tr></table>", |
| "text": "/88.23 90.01/88.08 89.93/87.93 93.19/91.21 92.81/90.97 92.77/90.86 no 80.82/72.94 80.60/72.83 80.98/73.10 85.81/79.03 85.50/78.64 85.43/78.76 sv 80.33/72.54 79.90/72.16 80.43/72.68 85.61/78.34 85.64/78.58 85.44/78.33 fr 77.71/72.35 78.49 \u2020 /73.30 \u2020 78.31/73.29 85.22/80.78 84.76/80.26 85.91 \u2020 /81.63 \u2020 pt 76.41/67.35 76.88 \u2020 /67.74 77.09 \u2020 /67.81 82.93/73.33 82.71/73.13 83.43 \u2020 /73.88 \u2020 da 76.58/68.11 75.99/67.64 76.25/68.03 82.36/73.53 82.40/73.68 82.36/73.86 \u2020 es 73.76/65.46 74.14/65.78 74.08/65.84 80.81/72.66 81.11/72.80 81.38 \u2020 /73.29 \u2020 it 80.89/75.61 81.33 \u2020 /76.14 \u2020 80.70/75.57 87.07/82.38 86.90/82.22 87.41/82.67 hr 62.21/52.67 63.38 \u2020 /53.83 \u2020 63.11 \u2020 /53.62 \u2020 72.96/62.65 73.39 \u2020 /62.20 74.20 \u2020 /63.55 \u2020 ca 73.18/64.53 73.46 \u2020 /64.71 73.40/64.90 \u2020 80.40/71.42 80.30/71.42 80.75/71.78 pl 74.65/62.72 75.65 \u2020 /63.31 \u2020 75.93/63.60 81.51/69.25 82.33 \u2020 /69.91 \u2020 82.48 \u2020 /70.54 \u2020 uk 59.25/51.92 60.58 \u2020 /52.72 \u2020 60.81 \u2020 /52.66 \u2020 69.98/61.52 70.24/61.61 71.21 \u2020 /62.84 \u2020 sl 67.51/56.42 68.14/56.52 68.40/56.87 75.15/63.12 74.60/62.52 75.50/63.65 \u2020 nl 68.54/59.99 68.80/60.23 69.23 \u2020 /60.51 \u2020 76.76/68.35 76.94/68.28 76.89/68.76 \u2020 bg 79.09/67.61 80.01 \u2020 /68.42 79.72/68.39 86.82/75.47 87.08/75.40 87.61 \u2020 /75.94 \u2020 ru 60.91/52.03 61.42 \u2020 /52.27 \u2020 61.67 \u2020 /52.41 \u2020 71.92/62.09 72.31/62.15 72.88 \u2020 /62.94 \u2020 de 71.41/61.97 70.70/61.41 71.05/61.84 78.66/69.81 78.04/69.23 79.08 \u2020 /70.26 \u2020 he 55.70/48.08 57.33 \u2020 /49.37 \u2020 57.15 \u2020 /49.36 \u2020 64.46/55.82 64.97 \u2020 /55.63 65.30 \u2020 /55.76 cs 63.30/54.14 63.94 \u2020 /54.63 \u2020 64.37 \u2020 /55.08 \u2020 73.78/63.52 74.57 \u2020 /63.86 74.56 \u2020 /64.17 \u2020 ro 65.13/53.98 65.86/54.76 65.57/54.42 75.10/62.99 75.85 \u2020 /63.92 \u2020 76.06 \u2020 /63.78 \u2020 sk 66.79/58.23 67.46 \u2020 /58.77 67.42 \u2020 /58.70 76.30/67.38 77.08 \u2020 /67.57 77.86 \u2020 /68.28 \u2020 id 49.85/44.09 52.05 \u2020 /45.76 \u2020 51.57/45.31 56.80/50.24 57.45 \u2020 /50.27 57.30 \u2020 /50.70 .78 65.31 \u2020 /45.12 \u2020 65.38 \u2020 /45.32 \u2020 71.55/50.98 71.73/51.27 71.25/51.16 ar 37.63/27.48 38.72 \u2020 /28.00 \u2020 38.98 \u2020 /27.89 \u2020 49.27/37.62 50.37 \u2020 /39.37 \u2020 50.95 \u2020 /39.57 \u2020 la 47.74/34.90 48.80 \u2020 /35.64 \u2020 49.17 \u2020 /35.73 \u2020 51.83/38.20 51.48/38.00 52.20/38.28 ko 34.44/16.18 33.98/15.93 34.23/16.08 38.10/20.62 38.03/20.59 38.98 \u2020 /21.54 \u2020 hi 36.34/27.43 36.72/27.40 37.37 \u2020 /28.01 \u2020 45.40/35.03 47.74 \u2020 /35.90 \u2020 46.10 \u2020 /34.74 Average 65.92/55.86 66.40 \u2020 /56.22 \u2020 66.53 \u2020 /56.32 \u2020 73.34/62.93 73.55/62.99 73.88 \u2020 /63.43 \u2020" |
| }, |
| "TABREF2": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>Lang</td><td>(en)</td><td>(en-fr)</td><td>(en-ru)</td></tr><tr><td colspan=\"4\">en 89.65lv 70.22/48.46 71.08 \u2020 /49.10 \u2020 70.76/48.86</td></tr><tr><td>fi</td><td colspan=\"2\">65.39/47.78 65.59/48.31 \u2020</td><td>65.42/47.84</td></tr><tr><td>et</td><td colspan=\"2\">64.73/43.84 65.01/44.27</td><td>65.04/44.16</td></tr><tr><td>ar</td><td colspan=\"3\">30.98/23.83 31.91 \u2020 /24.72 \u2020 32.83 \u2020 /25.34 \u2020</td></tr><tr><td>la</td><td colspan=\"2\">45.28/33.08 44.94/32.94</td><td>45.12/33.11</td></tr><tr><td>ko</td><td colspan=\"2\">33.50/14.36 32.87/14.10</td><td>32.60/14.11</td></tr><tr><td>hi</td><td colspan=\"2\">27.63/19.16 27.66/19.22</td><td>26.72/18.96</td></tr></table>", |
| "text": "/87.43 89.88/87.66 89.67/87.56 no 80.20/72.11 80.42/72.49 80.73 \u2020 /72.65 \u2020 sv 81.02/72.95 81.14/73.44 \u2020 81.20/73.37 fr 77.42/72.27 77.45/72.72 77.78/73.10 pt 75.94/67.40 76.09/67.47 76.39 \u2020 /67.85 \u2020 da 76.87/68.06 77.43 \u2020 /68.62 \u2020 77.92 \u2020 /69.24 \u2020 es 73.92/65.95 74.32 \u2020 /66.35 \u2020 74.83 \u2020 /66.83 \u2020 it 80.09/75.36 80.98 \u2020 /76.00 \u2020 81.04 \u2020 /76.06 \u2020 hr 59.53/49.19 60.00 \u2020 /50.02 \u2020 60.16 \u2020 /50.16 \u2020 ca 73.62/64.97 73.73/65.11 74.18 \u2020 /65.59 \u2020 pl 71.48/57.43 72.48 \u2020 /59.19 \u2020 72.55 \u2020 /58.38 \u2020 uk 57.23/49.67 58.38 \u2020 /51.04 \u2020 58.57 \u2020 /50.88 \u2020 sl 65.48/53.40 66.11 \u2020 /54.21 \u2020 66.23 \u2020 /54.09 \u2020 nl 67.13/59.15 67.57/59.71 \u2020 67.76 \u2020 /59.96 \u2020 bg 77.28/65.77 77.79 \u2020 /66.66 \u2020 78.02 \u2020 /66.53 \u2020 ru 58.70/49.34 59.77 \u2020 /50.77 \u2020 59.98 \u2020 /50.51 \u2020 de 69.71/58.51 70.03/59.45 \u2020 70.05/59.38 \u2020 he 52.97/45.73 53.63 \u2020 /46.49 \u2020 54.72 \u2020 /47.34 \u2020 cs 60.99/51.63 61.60 \u2020 /52.41 \u2020 61.81 \u2020 /52.45 \u2020 ro 62.01/51.03 62.49/51.30 63.22 \u2020 /51.91 \u2020 sk 64.44/56.01 65.03 \u2020 /56.65 \u2020 65.36 \u2020 /56.67 \u2020 id 45.08/40.00 45.46/40.61 \u2020 46.82 \u2020 /41.63 \u2020 Average 64.09/53.93 64.51 \u2020 /54.52 \u2020 64.74 \u2020 /54.64 \u2020" |
| }, |
| "TABREF3": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td/><td colspan=\"3\">SelfAtt-Graph</td><td/><td/><td colspan=\"2\">RNN-Stack</td></tr><tr><td>AT</td><td>en-fr</td><td/><td colspan=\"2\">en-ru</td><td>en-fr</td><td/><td colspan=\"2\">en-ru</td></tr><tr><td/><td>word</td><td>sent</td><td>word</td><td>sent</td><td>word</td><td>sent</td><td>word</td><td>sent</td></tr><tr><td>GR</td><td>66.19</td><td/><td/><td/><td/><td/><td/></tr></table>", |
| "text": "66.21 66.38 66.38 64.51 64.51 64.52 64.52 GAN 66.40 66.29 66.53 66.41 64.40 64.51 64.63 64.74 WGAN 66.24 66.18 66.40 66.27 64.29 64.34 64.57 64.57" |
| }, |
| "TABREF4": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>Lang</td><td colspan=\"2\">Auxiliary Language Perf.</td><td colspan=\"2\">Average Cross-lingual Perf.</td><td colspan=\"2\">Lang. Test Perf.</td></tr><tr><td>(Src. + Aux.)</td><td>AT</td><td>MTL</td><td>AT</td><td>MTL</td><td>AT</td><td>MTL</td></tr><tr><td>en + fr</td><td colspan=\"4\">78.49/73.30 \u2020 78.26/72.98 \u2020 66.40/56.22 66.18/56.04</td><td>62.25</td><td>59.94</td></tr><tr><td>en + pt</td><td>76.53/67.45 \u2020</td><td>75.88/66.75</td><td colspan=\"2\">66.40/56.22 66.27/56.08</td><td>60.17</td><td>72.02</td></tr><tr><td>en + es</td><td>73.66/65.48</td><td colspan=\"3\">74.04/65.83 \u2020 66.38/56.24 66.22/56.12</td><td>56.78</td><td>74.52</td></tr><tr><td>en + ru</td><td>61.67/52.41 \u2020</td><td>61.08/52.04</td><td colspan=\"2\">66.53/56.32 66.35/56.20</td><td>37.34</td><td>60.56</td></tr><tr><td>en + de</td><td>71.65/62.11 \u2020</td><td>71.17/61.88</td><td colspan=\"2\">66.41/56.13 66.18/56.12</td><td>61.22</td><td>72.08</td></tr><tr><td>en + la</td><td colspan=\"4\">49.22/35.94 \u2020 48.04/35.09 \u2020 66.45/56.20 66.17/56.05</td><td>50.04</td><td>64.91</td></tr></table>", |
| "text": "Average cross-lingual transfer performances (UAS%, excluding punctuation) on the test sets using different adversarial training objective and setting. Multilingual word embeddings are used for these experiments." |
| }, |
| "TABREF5": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>).</td></tr></table>", |
| "text": "" |
| }, |
| "TABREF6": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>: Average cross-lingual transfer performances</td></tr><tr><td>(UAS%/LAS%, w/o punctuation) on the test sets using</td></tr><tr><td>SelfAtt-Graph parser when different languages play the</td></tr><tr><td>role of the auxiliary language in adversarial training.</td></tr></table>", |
| "text": "" |
| } |
| } |
| } |
| } |