ACL-OCL / Base_JSON /prefixN /json /N19 /N19-1017.json
Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "N19-1017",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T13:56:33.118021Z"
},
"title": "Phylogenetic Multi-Lingual Dependency Parsing",
"authors": [
{
"first": "Mathieu",
"middle": [],
"last": "Dehouck",
"suffix": "",
"affiliation": {},
"email": "mathieu.dehouck@inria.fr"
},
{
"first": "Pascal",
"middle": [],
"last": "Denis",
"suffix": "",
"affiliation": {},
"email": "pascal.denis@inria.fr"
},
{
"first": "Graham",
"middle": [],
"last": "Neubig",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Chris",
"middle": [],
"last": "Dyer",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Yoav",
"middle": [],
"last": "Goldberg",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Austin",
"middle": [],
"last": "Matthews",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Waleed",
"middle": [],
"last": "Ammar",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Antonios",
"middle": [],
"last": "Anastasopou- Los",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Miguel",
"middle": [],
"last": "Ballesteros",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "David",
"middle": [],
"last": "Chiang",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Daniel",
"middle": [],
"last": "Cloth",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Trevor",
"middle": [],
"last": "Cohn",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Kevin",
"middle": [],
"last": "Duh",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Manaal",
"middle": [],
"last": "Faruqui",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Cynthia",
"middle": [],
"last": "Gan",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Dan",
"middle": [],
"last": "Garrette",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Yangfeng",
"middle": [],
"last": "Ji",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Lingpeng",
"middle": [],
"last": "Kong",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Adhiguna",
"middle": [],
"last": "Kuncoro",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Gaurav",
"middle": [],
"last": "Kumar",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Chai- Tanya",
"middle": [],
"last": "Malaviya",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Paul",
"middle": [],
"last": "Michel",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Yusuke",
"middle": [],
"last": "Oda",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Matthew",
"middle": [],
"last": "Richardson",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Naomi",
"middle": [],
"last": "Saphra",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Swabha",
"middle": [],
"last": "Swayamdipta",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Pengcheng",
"middle": [
"2017"
],
"last": "Yin",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Joakim",
"middle": [],
"last": "Nivre",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Mitchell",
"middle": [],
"last": "Abrams",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "\u017deljko",
"middle": [],
"last": "Agi\u0107",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Lars",
"middle": [],
"last": "Ahrenberg",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Lene",
"middle": [],
"last": "Antonsen",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Maria",
"middle": [
"Jesus"
],
"last": "Aranzabe",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Gashaw",
"middle": [],
"last": "Arutie",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Masayuki",
"middle": [],
"last": "Asahara",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Luma",
"middle": [],
"last": "Ateyah",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Mohammed",
"middle": [],
"last": "Attia",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Aitziber",
"middle": [],
"last": "Atutxa",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Liesbeth",
"middle": [],
"last": "Augustinus",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Elena",
"middle": [],
"last": "Badmaeva",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Miguel",
"middle": [],
"last": "Balles- Teros",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Esha",
"middle": [],
"last": "Banerjee",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Sebastian",
"middle": [],
"last": "Bank",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Verginica",
"middle": [
"Barbu"
],
"last": "Mititelu",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "John",
"middle": [],
"last": "Bauer",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Sandra",
"middle": [],
"last": "Bellato",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Kepa",
"middle": [],
"last": "Bengoetxea",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Riyaz",
"middle": [
"Ahmad"
],
"last": "Bhat",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Erica",
"middle": [],
"last": "Biagetti",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Adriane",
"middle": [],
"last": "Bowman",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Aljoscha",
"middle": [],
"last": "Boyd",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Marie",
"middle": [],
"last": "Burchardt",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Bernard",
"middle": [],
"last": "Candito",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Gauthier",
"middle": [],
"last": "Caron",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Cebiro\u011flu",
"middle": [],
"last": "Caron",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Giuseppe",
"middle": [
"G A"
],
"last": "Eryi\u011fit",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Savas",
"middle": [],
"last": "Celano",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Fabricio",
"middle": [],
"last": "Cetin",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Jinho",
"middle": [],
"last": "Chalub",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Yongseok",
"middle": [],
"last": "Choi",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Jayeol",
"middle": [],
"last": "Cho",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Silvie",
"middle": [],
"last": "Chun",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "",
"middle": [],
"last": "Cinkov\u00e1",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Carly",
"middle": [],
"last": "Dickerson",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Peter",
"middle": [],
"last": "Dirix",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Kaja",
"middle": [],
"last": "Dobrovoljc",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Timothy",
"middle": [],
"last": "Dozat",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Kira",
"middle": [],
"last": "Droganova",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Puneet",
"middle": [],
"last": "Dwivedi",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Marhaba",
"middle": [],
"last": "Eli",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Ali",
"middle": [],
"last": "Elkahky",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Binyam",
"middle": [],
"last": "Ephrem",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Toma\u017e",
"middle": [],
"last": "Erjavec",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Aline",
"middle": [],
"last": "Etienne",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Rich\u00e1rd",
"middle": [],
"last": "Farkas",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Hector",
"middle": [],
"last": "Fernandez Alcalde",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Jennifer",
"middle": [],
"last": "Foster",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Cl\u00e1udia",
"middle": [],
"last": "Freitas",
"suffix": "",
"affiliation": {},
"email": ""
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Languages evolve and diverge over time. Their evolutionary history is often depicted in the shape of a phylogenetic tree. Assuming parsing models are representations of their languages grammars, their evolution should follow a structure similar to that of the phylogenetic tree. In this paper, drawing inspiration from multi-task learning, we make use of the phylogenetic tree to guide the learning of multilingual dependency parsers leveraging languages structural similarities. Experiments on data from the Universal Dependency project show that phylogenetic training is beneficial to low resourced languages and to well furnished languages families. As a side product of phylogenetic training, our model is able to perform zero-shot parsing of previously unseen languages.",
"pdf_parse": {
"paper_id": "N19-1017",
"_pdf_hash": "",
"abstract": [
{
"text": "Languages evolve and diverge over time. Their evolutionary history is often depicted in the shape of a phylogenetic tree. Assuming parsing models are representations of their languages grammars, their evolution should follow a structure similar to that of the phylogenetic tree. In this paper, drawing inspiration from multi-task learning, we make use of the phylogenetic tree to guide the learning of multilingual dependency parsers leveraging languages structural similarities. Experiments on data from the Universal Dependency project show that phylogenetic training is beneficial to low resourced languages and to well furnished languages families. As a side product of phylogenetic training, our model is able to perform zero-shot parsing of previously unseen languages.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Languages change and evolve over time. A community that spoke once a single language can be split geographically or politically, and if the separation is long enough their language will diverge in direction different enough so that at some point they might not be intelligible to each other. The most striking differences between related languages are often of lexical and phonological order but grammars also change over time.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Those divergent histories are often depicted in the shape of a tree in which related languages whose common history stopped earlier branch off higher than languages that have shared a longer common trajectory (J\u00e4ger, 2015) . We hypothesize that building on this shared history is beneficial when learning dependency parsing models. We thus propose to use the phylogenetic structure to guide the training of multi-lingual graph-based neural dependency parsers that will tie parameters between languages according to their common history.",
"cite_spans": [
{
"start": 209,
"end": 222,
"text": "(J\u00e4ger, 2015)",
"ref_id": "BIBREF11"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "As our phylogenetic learning induces parsing models for every inner node in the phylogenetic tree, it can also perform zero-shot dependency parsing of unseen languages. Indeed, one can use the model of the lowest ancestor (in the tree) of a new language as an approximation of that language grammar.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We assess the potential of phylogenetic training with experiments on data from the Universal Dependencies project version 2.2. Our results show that parsers indeed benefit from this multi-lingual training regime as models trained with the phylogenetic tree outperform independently learned models. The results on zero-shot parsing show that a number of factors such as the genre of the data and the writing system have a significant impact on the quality of the analysis of an unseen language, with morphological analysis being of great help.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The remaining of this paper is organized as follows. Section 2 presents both the neural parsing model as well as the phylogenetic training procedure. Section 3 presents some experiments over data from UD 2.2. Section 4 presents some related works on multi-task learning and multilingual parsing. Finally, Section 5 closes the paper and gives some future perspectives.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We propose a multi-task learning framework that shares information between tasks using a tree structure. The tree structure allows us to both share model parameters and training samples between related tasks. We instantiate it with a graph-based neural parser and use the language phylogenetic tree to guide the learning process, but it can in principle be used with any tree that encodes tasks relateness and any learning algorithm that supports fine-tuning.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Model",
"sec_num": "2"
},
{
"text": "In this section we first describe the intuition be- hind phylogenetic training, then the neural parser and then the phylogenetic training itself.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Model",
"sec_num": "2"
},
{
"text": "Languages evolve from earlier stages and sometimes a language will change differently in different places leading to different languages with a common ancestor. This evolution process is often depicted in the shape of a tree in which leaves are actual languages and inner nodes can be either attested ancestral languages or their idealized reconstruction. Figure 1 gives an example of such a tree for a subset of the Slavic family of Indo-European languages (Simons and Fennig, 2018) .",
"cite_spans": [
{
"start": 458,
"end": 483,
"text": "(Simons and Fennig, 2018)",
"ref_id": "BIBREF17"
}
],
"ref_spans": [
{
"start": 356,
"end": 364,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Phylogenetic Hypothesis",
"sec_num": "2.1"
},
{
"text": "Just as languages evolve and diverge, so do their grammars. Assuming a parsing model is a parameterized representation of a grammar, then we can expect those models to evolve in a similar way. We thus take a multi-task approach to the problem of multi-lingual dependency parsing. What was once a single problem (e.g. parsing sentences in Proto-West-Slavic) becomes a set of distinct but related problems (parsing sentences in Czech, Polish, Slovak and Sorbian) as Proto-West-Slavic was evolving into its modern descendants.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Phylogenetic Hypothesis",
"sec_num": "2.1"
},
{
"text": "We assume that the grammar of the last common ancestor is a good approximation of those languages grammars. Thus it should be easier to learn a language's grammar starting from its ancestor grammar than from scratch. There are however some issues with this assumption. First, a language grammar can be very different from its ancestor one from two millennia earlier. Consider the difference between modern French and early Classical Latin for example, in two millennia Latin has wit-nessed the loss of its case system and a complete refoundation of its verbal system. And the \"last common\" ancestors can have very different age depending on the languages we consider. We expect the common ancestor of Tagalog and Indonesian to be much much older than the common ancestor of Portuguese and Galician. Second, a lot of languages have only started to be recorded very recently thus lacking historical data all together. And when historical records are available, much work still needs to be done to render those data usable by parsers. For example the Universal Dependencies Project (Nivre et al., 2018) only has annotated corpora for Latin, old Greek, old Church Slavonic and Sanskrit. And even for those classical languages, it is not clear to which extent their modern counterparts really descend from them. Thus we need to find another way to access the ancestor language grammar than using historical data.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Phylogenetic Hypothesis",
"sec_num": "2.1"
},
{
"text": "We propose to use all the data from descendent languages to represent an ancestor language. In principle, one could give more weight to older languages or to languages that are known to be more conservative, but this knowledge is not available for all languages families. Thus we resort to using all the available data from descendent languages without distinction.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Phylogenetic Hypothesis",
"sec_num": "2.1"
},
{
"text": "Another problem is that the tree view is too simple to represent the complete range of phenomena involved in language evolution, such as language contacts. Furthermore, languages do not evolve completely randomly, but follow some linguistic universals and have to keep a balance between speakability, learnability and understandability. Thus, languages can share grammatical features without necessarily being genetically related, either by contact or by mere chance. However, the tree model is still a good starting point in practice and language families align well with grammatical similarity as recent works on typological analysis of UD treebanks have shown (Chen and Gerdes, 2017; Schluter and Agi\u0107, 2017) . We thus make the simplifying assumption that a language grammar evolves only from an older stage and can be approximated by that previous stage.",
"cite_spans": [
{
"start": 663,
"end": 686,
"text": "(Chen and Gerdes, 2017;",
"ref_id": "BIBREF7"
},
{
"start": 687,
"end": 711,
"text": "Schluter and Agi\u0107, 2017)",
"ref_id": "BIBREF16"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Phylogenetic Hypothesis",
"sec_num": "2.1"
},
{
"text": "Our scoring model is an edge factored graph-based neural model in the vein of recent works by Dozat et al. (Dozat et al., 2017 ). There are two major differences here compared to the parser of Dozat et al. The first difference is in individual word representation, for which we use only the UPOS 1 tag, morphological information provided by UD treebanks and a character based word representation, whilst Dozat et al. use also the XPOS 2 tag, holistic word vectors (from Word2Vec (Mikolov et al., 2013) and their own) and they do not use morphological information beside what might already be given by the XPOS. The second difference is the scoring function proper. While they use biaffine scoring functions and decouple edge scoring from label scoring, we use a simple multi-layer perceptron to compute label scores and pick the max over the label as the edge score. Let x = (w 1 w 2 ...w l ) be a sentence of length l. Each word w i is represented as the concatenation of 3 subvectors, one for its part-of-speech, one for its morphological attributes and one for its form:",
"cite_spans": [
{
"start": 107,
"end": 126,
"text": "(Dozat et al., 2017",
"ref_id": "BIBREF8"
},
{
"start": 479,
"end": 501,
"text": "(Mikolov et al., 2013)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Neural Model",
"sec_num": "2.2"
},
{
"text": "\u2022 \u2022 \u2022 \u2295 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 w o r d",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Neural Model",
"sec_num": "2.2"
},
{
"text": "w i = pos i \u2295 morph i \u2295 char i .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Neural Model",
"sec_num": "2.2"
},
{
"text": "The part-of-speech vector (pos i ) is from a look up table. The morphological vector (morph i ) is the sum of the representation m m of each morphological attribute m of the word given by the treebanks:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Neural Model",
"sec_num": "2.2"
},
{
"text": "morph i = m\u2208morph i m m .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Neural Model",
"sec_num": "2.2"
},
{
"text": "We add a special dummy attribute representing the absence of morphological attributes. The form vector (char i ) is computed by a character BiLSTM (Hochreiter and Schmidhuber, 1997) . Characters are fed one by one to the recurrent neural network in each direction. The actual form vector is then the concatenation of the outputs of the forward character LSTM and of the backward character LSTM as depicted in Figure 2 . Once, each word has been given a representation in isolation, those representations are passed to two other BiLSTMs. Each word is then represented as the concatenation of its contextualised vector from the forward and backward layers:",
"cite_spans": [
{
"start": 147,
"end": 181,
"text": "(Hochreiter and Schmidhuber, 1997)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [
{
"start": 409,
"end": 417,
"text": "Figure 2",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "Neural Model",
"sec_num": "2.2"
},
{
"text": "\u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2295 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 <ROOT>",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Neural Model",
"sec_num": "2.2"
},
{
"text": "c i = f orward(w 1 , ..., w i )\u2295backward(w i , ..., w l ).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Neural Model",
"sec_num": "2.2"
},
{
"text": "We actually train two different BiLSTMs, one representing words as dependents (c) and one words as governors (\u0109). An edge score is then computed as follows. Its governor word vector\u0109 i and its dependent word vector c j are concatenated and fed to a two layer perceptron (whose weights are L 1 and L 2 ) with a rectifier (noted [...] + ) after the first layer in order to compute the score s ijl of the edge for every possible relation label l:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Neural Model",
"sec_num": "2.2"
},
{
"text": "s ij = max l s ijl = max l (L 2 \u2022 [L 1 \u2022 (\u0109 i \u2295 c j )] + ) l .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Neural Model",
"sec_num": "2.2"
},
{
"text": "All the neural model parameters \u03b8 (part-ofspeech, character and morphological embeddings, character, dependant and governor BiLSTMs and the two layer perceptron weights) are trained end to end via back propagation one sentence at a time. Given a sentence x, we note j the index of the governor of w i and l the relation label of w i , the loss function is:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Neural Model",
"sec_num": "2.2"
},
{
"text": "loss(x) = w i j =j j =i max(0, s ij \u2212 s ij + 1) 2 + l =l max(0, s ijl \u2212 s ijl + 1) 2",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Neural Model",
"sec_num": "2.2"
},
{
"text": "For each word, there are two terms. The first term enforces that for all potential governors that are neither the word itself nor its actual governor, their highest score (irrespective of the relation label) should be smaller than the score of the actual governor and actual label by a margin of 1. The second term is similar and enforces that for the actual governor, any label that is not the true label should have a score smaller than the score of the actual label again by a margin of 1.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Neural Model",
"sec_num": "2.2"
},
{
"text": "Let L = {l 1 , l 2 , ..., l n l } be a set of n l languages and let P = {p 1 , p 2 , ..., p np } be a set of n p protolanguages (hypothesized ancestors of languages in L). Let T be a tree over L * = L \u222a P such that languages of L are leaves and proto-languages of P are inner nodes. This means that we assume no two languages in L share a direct parenthood relation, but they at best descend both from a hypothesized parent. We could in principle have data appearing only in inner nodes. Tree T has a single root, a proto-language from P that all grammars descend from. This ancestor of all languages shall model linguistic universals 3 and ensure we deal with a well formed tree. We use the notation p > l for the fact that language/node l descends from language/node p.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Phylogenetic Training",
"sec_num": "2.3"
},
{
"text": "For each language l \u2208 L, we assume access to a set of n annotated examples D l . For each protolanguage p \u2208 P, we create an annotated set D p = p>l D l as the union of its descendent sets. For each language l \u2208 L * , we want to learn a parsing model \u03b8 l .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Phylogenetic Training",
"sec_num": "2.3"
},
{
"text": "The main idea behind phylogenetic training is to initialize a new model with the model of its parent, thus effectively sharing information between languages and letting models diverged and specialize over time. The training pocedure is summarized in Algorithm 1.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Model Evolution",
"sec_num": "2.3.1"
},
{
"text": "At the beginning, we initialize a new blank/random model that will be the basic parsing model for all the world languages. Then, we sample sentences (we will discuss sampling issues in next section) randomly from all the available languages, parse them, compute the loss and update the model accordingly. Since the training sentences are sampled from all the available languages, the model Data: a train set D l and a dev set D l per language, a tree T , two sampling sizes k, k and a maximum number of reboot r Result: a model \u03b8 per node in T begin Instantiate empty queue Q Q.push(T .root) while Q is not empty do l = Q.pop() if l = T .root then initialize \u03b8 0 T .root randomly else",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Model Evolution",
"sec_num": "2.3.1"
},
{
"text": "\u03b8 0 l = \u03b8 l.parent reboot = 0, i = 1, a 0 = 0 while reboot < r do \u03b8 i l = train(\u03b8 i\u22121 l , D l , k) a i = test(\u03b8 i l , D l , k ) if a i \u2264 a i\u22121 then reboot += 1 else reboot = 0, i += 1 \u03b8 l = \u03b8 i l for c in l.children do Q.push(c)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Model Evolution",
"sec_num": "2.3.1"
},
{
"text": "Algorithm 1: Phylogenetic training procedure.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Model Evolution",
"sec_num": "2.3.1"
},
{
"text": "will learn to be as good as possible for all the languages at the same time. When the model \u03b8 p has reached an optimum (that we defined hereafter), we pass a copy of it to each of its children. Thus, for each child c of p, we initialize \u03b8 0 c = \u03b8 p to its parent (p) final state. Each model \u03b8 c is then refined on its own data set D c which is a subset of D p , until it reaches its own optimum state and is passed down to its own children. This process is repeated until the model reaches a leaf language, where the model \u03b8 c is eventually refined over its mono-lingual data set D c .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Model Evolution",
"sec_num": "2.3.1"
},
{
"text": "By passing down optimal models from older/larger languages sets to newer/smaller ones, models get the chance to learn relevant information from many different languages while specializing as time goes by.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Model Evolution",
"sec_num": "2.3.1"
},
{
"text": "The question now is to find when to pass down a model to its children. In other words, at which stage has a model learned the most it could from its data and should start to diverge to improve again?",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Model Evolution",
"sec_num": "2.3.1"
},
{
"text": "Following the principle of cross-validation, we propose to let held-out data decide when is the right time to pass the model down. Let D p be a set of held-out sentences from the same languages as D p . Then, after every epoch i of k training examples, we freeze the model \u03b8 i p , and test it on k sentences from D p . This gives a score a i (UAS/LAS) to the current model. If the score is higher than the score of the previous model \u03b8 i\u22121 p then training goes on, otherwise we discard it and retrain \u03b8 i\u22121 p for another k sentences. If after having discarded r epochs in a raw we have not yet found a better one, then we assume we have reached an optimal model \u03b8 i\u22121 p and pass it on to its children (unless it is a leaf, in which training is over for that language).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Model Evolution",
"sec_num": "2.3.1"
},
{
"text": "There are a few things we should consider when drawing examples from a proto-language distribution. Beside the question of whether some languages are more conservative than others with respect to their ancestor, which we have decided to simplify saying that all languages are as representative of their ancestors, there is the problem of data unbalance and tree unbalance.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Sentence Sampling",
"sec_num": "2.3.2"
},
{
"text": "Sampling sentences uniformly across languages is not a viable option for the size of datasets varies a lot across languages and that they do not correlate with how close a language is to its ancestor. For example, there are 260 Belarusian training sentences against 48814 Russian ones. The basic question is thus whether one should draw examples from languages or branches. Basic linguistic intuition tells us that drawing should be performed on branches. Modern languages distribution has no reason to reflect their proximity to their ancestor language. Amongst Indo-European languages, there are one or two Armenian languages as well as one or two Albanian languages (depending on the criteria for being a language), while there are tens of Slavic languages and Romance languages. However, there is no reason to believe that Slavic or Romance languages are better witnesses of proto-Indo-European than Armenian or Albanian.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Sentence Sampling",
"sec_num": "2.3.2"
},
{
"text": "Drawing examples from languages would bias the intermediate models toward families that have more languages (or more treebanks). It might be a good bias depending on the way one compute the overall accuracy of the system. If one uses the macro-average of the individual language parsers, then biasing models toward families with many members should improve the accuracy overall.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Sentence Sampling",
"sec_num": "2.3.2"
},
{
"text": "In this work, at a given inner node, we decided to sample uniformly at random over branches spanning from this node, then uniformly at random over languages and then uniformly at random over sentences. It boils down to flattening the subtree below an inner node to have a maximum depth of 2. For example in Figure 1 , at the root (Proto-Slavic) we pick a branch at random (e.g. Proto-South-Slavic), then a language at random (e.g. Croatian) then a sentence at random. Given that we have picked the Proto-South-Slavic branch, all South-Slavic languages are then as likely to be chosen. This biases slightly the model toward bigger subfamilies. In our example, Croatian and Serbian have the same chances to be sampled than Slovenian, therefore their family, Proto-Serbocroatian is twice as likely to be chosen as Slovenian is, while being at the same depth in the tree. We could otherwise sample over branches, then over sub-branches again and again until we reach a leaf and only then pick a sentence. In this case, Proto-Serbocroatian and Slovenian would have the same probability to be chosen. This would give much more weight to languages high in the tree than languages low in the tree. While this would give more balance to the actual model, it could be detrimental to the averaged results since the data distribution is itself unbalanced. It would of course be possible to try any variation between those two, picking sub-branches according to a probability that would depend on the number of languages in that family for example, therefore mitigating the unbalance problem.",
"cite_spans": [],
"ref_spans": [
{
"start": 307,
"end": 315,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Sentence Sampling",
"sec_num": "2.3.2"
},
{
"text": "An interesting property of the phylogenetic training procedure is that it provides a model for each inner node of the tree and thus each intermediary grammar. If one were to bring a new language with its position in the tree, then we can use the pretrained model of its direct ancestor as an initialization instead of learning a new model from scratch. Similarly, one can use this ancestor model directly to parse the new language, effectively performing zero-shot dependency parsing. We investigate this possibility in the experiment section.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Zero-Shot Parsing",
"sec_num": "2.4"
},
{
"text": "To assess the potential of phylogenetic training both in terms of multi-task learning and zero-shot parsing capabilities, we experimented with data from the Universal Dependencies project version 2.2 (Nivre et al., 2018). When several corpora are available for a language, we chose one to keep a good balance between morphological annotation and number of sentences. For example, the Portuguese GSD treebank has slightly more sentences than the Bosque treebank but it is not well morphologically annotated. The zero-shot parsing models have been directly tested on languages that lack of training set. The treebanks names are given in the tree 4 and the result table 1.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments",
"sec_num": "3"
},
{
"text": "As some languages have no training data and unique writing systems making the character model inefficient for them, we resorted to use gold parts-of-speech and morphological attributes rather than predicted ones. For example, Thai has no training data, no language relative and a unique script, which altogether make it really hard to parse (from a phylogenetic perspective).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Setting",
"sec_num": "3.1"
},
{
"text": "The phylogenetic tree used for the experiment is adapted from the Ethnologue (2018). For space reasons, it is reported in the appendix in Figures 4 and 5. We tried to have a tree as consensual as possible, but there are still a few disputable choices, mostly about granularity and consistency. Sanskrit could have its own branch in the Indic family just as Latin in the Romance family, but because Sanskrit has no training data, that would not actually change the results. Likewise, as Czechoslovak and Dutch-Afrikaans have their own branches, Scandinavian languages could also distributed between east and west Scandinavian. As an English based Creole, Naija could as well be put in the Germanic family, but we kept it as a separate (Creole) family.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Setting",
"sec_num": "3.1"
},
{
"text": "Regarding model training proper, we used k = 500 training sentences per iteration, k = 500 held-out sentences from the developpement set to compute running LAS and a maximum number of reboot r = 5. Following Dozat et al (2017), we use Eisner algorithm (Eisner, 1996) at test time to ensure outputs are well formed trees. The neural model is implemented in Dynet (Neubig et al., 2017) and we use Adadelta with default parameters as our trainer. We averaged the results over 5 random initializations. Independent models are trained in the same manner but with mono-lingual data only. We report both labeled and unlabeled edge prediction accuracy (UAS/LAS). In the appendix we also report results averaged per family. Table 1 reports parsing results for languages that have a training set. Note that a few languages do not have a separate developpement set, then we used the training set for both training and validation. The training set size of those languages is reported in square brackets. This has low to no impact on other languages results but it can be problematic for the language itself as it can over-fit its training data especially when they are very few as is the case of Buryat for example. To be fair, we report two different averages. Avg is the average over languages that have a separate developpement set, and Avg No Dev is the average over languages that do not have a separate developpement set. For each language, the best UAS/LAS are reported in bold.",
"cite_spans": [
{
"start": 252,
"end": 266,
"text": "(Eisner, 1996)",
"ref_id": "BIBREF9"
}
],
"ref_spans": [
{
"start": 715,
"end": 722,
"text": "Table 1",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Setting",
"sec_num": "3.1"
},
{
"text": "On average, phylogenetic training improves parsing accuracy, both labeled and unlabeled. This is especially true for languages that have very small training sets (50 sentences or less) and lack of developpement set. Those languages show an averaged 7 points improvement and up to 15 points (hsb, kmr). Since independent mono-lingual models follow the exact same training procedure but without phylogenetic initialization and that every sentence will be seen several times both at training and validation, the sampling method cannot explain such a difference. This shows that the ancestor's model is a good initialization and acts as a form of regularization, slowing down over-fitting.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Multi-Task Learning",
"sec_num": "3.2"
},
{
"text": "Phylogenetic training is also beneficial as one gains information from related languages. Indo-European languages gain from sharing information. This is especially true for Balto-Slavic (sk +5.82, lt +5.07 UAS) and Indo-Iranian languages (mr +2.05 UAS). It is less consistent for Romance and Germanic languages. This might be due to the tree not representing well typology for those families. Typically, English tends to group syntactically with Scandinavian languages more than with West-Germanic. Turkic and Uralic languages show the same benefits overall (ug +2.67, fi +3.39 UAS).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Multi-Task Learning",
"sec_num": "3.2"
},
{
"text": "Dravidian and Afro-Asiatic languages are not as consistent. While Telugu seems to gain from Tamil data, the reverse is not true. Result variation for Arabic, Hebrew and Coptic are marginal. This is likely due to the fact that we only have three quite different languages from that family and that they all have their own script.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Multi-Task Learning",
"sec_num": "3.2"
},
{
"text": "Similarly, phylogenetic training is not consistently useful for languages that do not have rela- tives. While Buryat (bxr) that has a very small training set benefits from universal linguistic information and gain almost 11 points UAS, Basque (eu) that has a very different grammatical structure than other languages and enough training data (5396 sentences) looses 3.25 LAS. Gains and losses are marginal for the other five languages (id, ja, ko, vi, zh) .",
"cite_spans": [
{
"start": 435,
"end": 455,
"text": "(id, ja, ko, vi, zh)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Multi-Task Learning",
"sec_num": "3.2"
},
{
"text": "Overall results are a bit below the state of the art, but the model is very simple and relies on gold morphology, so it is not really comparable. Table 2 reports parsing results for languages that do not have a training set. Because of phylogenetic training and the tree structure that guides it, it can happen that a language ancestor's model is in fact trained on data only accounting for a narrow range of later stages. For example, while Faroese uses the North-Germanic model refined on both Norwegians, Swedish and Danish data, Tagalog uses the Austronesian model only refined with Indonesian data thus making it more an Indonesian model than an actual Austronesian model. Those cases are marked by an asterisk in the table. Komi (kpv) model is refined on Finno-Samic data, Breton (br) model on Irish data, Cantonese (yue) model on Mandarin data.",
"cite_spans": [],
"ref_spans": [
{
"start": 146,
"end": 153,
"text": "Table 2",
"ref_id": "TABREF3"
}
],
"eq_spans": [],
"section": "Multi-Task Learning",
"sec_num": "3.2"
},
{
"text": "Looking at Table 2 , we make the following observations. As expected scores are on average lower than for languages with training data, however the UAS/LAS gap is substantially bigger from 6.781 to 17.08 points. It is hard to compare to other works on zero-shot parsing since they use different data and scores span a big range, but our results are comparable to those of Aufrant et al. (2016) and Naseem et al. (2012) , while our zero-shot models are given for free by the phylogenetic training method.",
"cite_spans": [
{
"start": 372,
"end": 393,
"text": "Aufrant et al. (2016)",
"ref_id": "BIBREF1"
},
{
"start": 398,
"end": 418,
"text": "Naseem et al. (2012)",
"ref_id": "BIBREF14"
}
],
"ref_spans": [
{
"start": 11,
"end": 18,
"text": "Table 2",
"ref_id": "TABREF3"
}
],
"eq_spans": [],
"section": "Zero-Shot Parsing",
"sec_num": "3.3"
},
{
"text": "On a language per language basis, we see that there are a few important factors, the most striking being genre. Tagalog (tl) and more surprisingly Warlpiri (wbp) have relatively high parsing accuracy despite being either completely isolated or having only one relative (Indonesian). This is likely because their data are well annotated stereotypical sentences extracted from grammars, thus making them easy to parse.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Zero-Shot Parsing",
"sec_num": "3.3"
},
{
"text": "Then we see that Naija (pcm) and Yoruba (yo) are about 25 points higher than Thai (th) despite them three having low morphology (in the treebanks). As they have different genres (spoken, bible, news and wiki), without a deeper look at the trees themselves, our best guess is that this is due to Thai having a different script. Naija and Yoruba both use the Latin alphabet, and as such they can rely to some extent on the character model to share information with other languages, to at least organise the character space. This analysis would also carry for Cantonese (yue). It is a morphologically simple language, and despite having a relative (Mandarin), its score is rather low. The genre alone (spoken) would not explain everything as Naija has also a spoken treebank and a higher score. The writing system might be at blame once again. Indeed, Chinese characters are very different from alphabetic characters and are much harder to use in character models because of sparsity. Comparing Mandarin and Cantonese test sets with Mandarin train set, the amount of out-of-vocabulary words is 32.47% of types (11.90% of tokens) for Mandarin and 54.88% of types (56.50% of tokens) for Cantonese. The results for out-of-vocabulary characters are even more striking with 3.73% of types (0.49% of tokens) for Mandarin and 12.97% of types (34.29% of tokens) for Cantonese. This shows that not only there are a lot of OOV in Cantonese test set, but that those words/characters are common ones as 12.97% of character types missing make up for more than a third of all character tokens missing, where on the contrary Mandarin OOV are seldom and account for less tokens percentage than types. This is one more argument supporting the importance of the character vector.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Zero-Shot Parsing",
"sec_num": "3.3"
},
{
"text": "Other important factors are typology and morphology. Amharic (am) despite its unique script has a higher score than Cantonese that actually shares its scripts (to some extent as we have seen) with Mandarin. The key point for Amharic score, is that all its relatives (Hebrew, Arabic and Cop-tic) have their own scripts and are morphologically rich, thus the model learns to use morphological information. The analysis is similar for Komi which on top of sharing morphology with its relatives also share the writing system which provides it an extra gain. However, this might word in the opposite direction as well, as we can see with Faroese, Breton and Sanskrit. Faroese (fo) is morphologically rich and that should help, however its North-Germanic relatives are morphologically much simpler. Thus the model does not learn to rely on morphological attributes nor on word endings for the character model as much. The same is true for Sanskrit (sa), which is morphologically richer than its modern Indic relatives, with an extra layer of specific writing systems. Eventually, Breton model (br) is refined over Irish data only and while Irish is a typological outlier amongst Indo-European languages because of its Verb-Subject-Object word order, Breton has the standard Subject-Verb-Object, thus using Irish data might actually be detrimental.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Zero-Shot Parsing",
"sec_num": "3.3"
},
{
"text": "These arguments show the respective importance of the writing system, the genre of the data, the morphological analysis and the typology in phylogenetic zero-shot dependency parsing. Those factors can either work together positively (Komi) or negatively (Cantonese) or cancel each other out (Amharic, Faroese).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Zero-Shot Parsing",
"sec_num": "3.3"
},
{
"text": "The goal of multi-task learning is to learn related tasks (either sharing their input and/or output space of participating of the same pipeline) jointly in order to improve their models over independently learned one (Caruana, 1997) . In S\u00f8gaard et al. (2016) , task hierarchy is directly encoded in the neural model allowing tasks with different output space to share parts of their parameters (POS tagging comes at a lower level than CCG parsing and only back-propagates to lower layers). Likewise, in Johnson et al. (2017) , the encoder/decoder architecture allows to learn encoders that target several output languages and decoders than handle data from various input languages. However, in multitask learning literature, task relationships are often fixed. In Cavallanti et al. (2010) tasks with the same output spaces share parameter updates through a fixed similarity graph. In this work, changing level in the tree can be seen as splitting the similarity graph into disjoint sub graphs. It is a way to have tasks relationships evolving during training and to encode information about task evolution that lacks in other multi-task methods.",
"cite_spans": [
{
"start": 217,
"end": 232,
"text": "(Caruana, 1997)",
"ref_id": "BIBREF3"
},
{
"start": 238,
"end": 259,
"text": "S\u00f8gaard et al. (2016)",
"ref_id": "BIBREF18"
},
{
"start": 504,
"end": 525,
"text": "Johnson et al. (2017)",
"ref_id": "BIBREF12"
},
{
"start": 765,
"end": 789,
"text": "Cavallanti et al. (2010)",
"ref_id": "BIBREF4"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "4"
},
{
"text": "In multi-lingual parsing, Ammar et al. (2016) propose to train a single model to parse many languages using both typological information, crosslingual word representations and language specific information. While their model gives good results, they only apply it to 7 Germanic and Romance languages. It would be worth doing the experiment with 50+ languages and see how the results would change. However, because of language specific information their model would probably become very big. In this work, language specific information is not added on the top of the model, but is just language generic information that refines over time. Che et al. (2017; and Stymne et al. (2018) propose to train parsers on several concatenated treebanks either from the same language or from related languages and to fine-tune the parsers on individual treebanks afterward to fit specific languages/domains. The main difference with our method, is that instead of one step of fine-tuning, we perform as many fine-tuning as there are ancestors in the tree, each time targeting more and more specific data. This in turn requires that we handle data imbalance therefore using sampling rather than plain concatenation. Aufrant et al. (2016) propose to tackle zeroshot parsing by rewriting source treebanks to better fit target language typology. Assuming that typology is homogeneous in a language family, the phylogeny should drive models to be typologically aware. However, as we have seen for Breton and Irish, that assumption might not always hold.",
"cite_spans": [
{
"start": 26,
"end": 45,
"text": "Ammar et al. (2016)",
"ref_id": "BIBREF0"
},
{
"start": 638,
"end": 655,
"text": "Che et al. (2017;",
"ref_id": "BIBREF5"
},
{
"start": 660,
"end": 680,
"text": "Stymne et al. (2018)",
"ref_id": "BIBREF19"
},
{
"start": 1201,
"end": 1222,
"text": "Aufrant et al. (2016)",
"ref_id": "BIBREF1"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "4"
},
{
"text": "Eventually, the closest work from our in spirit is the one of Berg-Kirkpatrick et al. (2010) . They use a phylogenetic tree to guide the training of unsupervised dependency parsing models of several languages, using ancestor models to tie descendent ones. The main difference here beside supervision, is that we do not use ancestor models as biases but rather as initialization of descendent models.",
"cite_spans": [
{
"start": 62,
"end": 92,
"text": "Berg-Kirkpatrick et al. (2010)",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "4"
},
{
"text": "We have presented a multi-task learning framework that allows one to train models for several tasks that have diverged over time. Leveraging their common evolutionary history through a phylogenetic tree, models share parameters and train-ing samples until they need to diverge. As a by product of this phylogenetic training, we are provided with intermediary models that can be used to zero-shot a new related task, given its position in the evolutionary history.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "We have applied this framework to dependency parsing using a graph-based neural parser and the phylogenetic tree of the languages from UD 2.2 to guide the training process. Our results show that phylogenetic training is beneficial for well populated families such as Indo-European and Uralic. It also helps generalization and prevents over-fitting when very few data are available. For zero-shot parsing, genre, writing system and morphology are crucial factors for the quality of parse predictions.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "Some works have been done on automatically learning task relationship in multi-task setting. It would be interesting to see how the algorithm could figure out when and how to cluster languages automatically as phylogenetic trees do not directly depict grammar evolution.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "Our model does not know that Latin came before Old French and before modern French, or that despite being Germanic, English underwent a heavy Romance influence. It would be worth investigating softening the tree constraints and instigating more evolutionary information in the structure.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "Another important point is that we use gold partof-speech and morphological information which is unlikely to be available in real scenarios. However, our new training procedure can be applied to any task, so a future work would be to use it to perform phylogenetic POS tagging.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "Other directions for the future are designing better sampling methods as well as better ways to measure training convergence at each level.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "Universal part-of-speech for a set of 17 tags. Does not encode morphology.2 Language specific part-of-speech. Might include morphological information, but is not available for all languages.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "It does not imply anything about our belief or not in the monoglotto genesis hypothesis.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "This work was supported by ANR Grant GRASP No. ANR-16-CE33-0011-01 and Grant from CPER Nord-Pas de Calais/FEDER DATA Advanced data science and technologies 2015-2020. We also thank the reviewers for their valuable feedback.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgement",
"sec_num": null
},
{
"text": " Figure 4 : Phylogenetic tree used to guide the training process of the multi-lingual parser. Underlined languages are those that do not have a training set. The code of the language and if necessary the name of the treebank are given in parentheses. The Indo-European sub-tree is depicted on the right.Figures 4 and 5 represent the phylogenetic tree used for guiding the training process. As we only use data from the UD project 2.2, we collapse unique child so that Vietnamese is not an Austro-Asiatic language, it is just Vietnamese. We also only use well attested families, thus Buryat, a Mongolic language, is alone and not linked to Turkic languages. Maybe, the most disputable choice is to put Naija in its own Creole family instead of the Germanic family. Figure 5 : The Indo-European phylogenetic tree.",
"cite_spans": [],
"ref_spans": [
{
"start": 1,
"end": 9,
"text": "Figure 4",
"ref_id": null
},
{
"start": 764,
"end": 772,
"text": "Figure 5",
"ref_id": null
}
],
"eq_spans": [],
"section": "A Appendix",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Many languages, one parser",
"authors": [
{
"first": "Waleed",
"middle": [],
"last": "Ammar",
"suffix": ""
},
{
"first": "George",
"middle": [],
"last": "Mulcaire",
"suffix": ""
},
{
"first": "Miguel",
"middle": [],
"last": "Ballesteros",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "Dyer",
"suffix": ""
},
{
"first": "Noah",
"middle": [],
"last": "Smith",
"suffix": ""
}
],
"year": 2016,
"venue": "Transactions of the Association for Computational Linguistics",
"volume": "4",
"issue": "",
"pages": "431--444",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Waleed Ammar, George Mulcaire, Miguel Ballesteros, Chris Dyer, and Noah Smith. 2016. Many languages, one parser. Transactions of the Association for Com- putational Linguistics, 4:431-444.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Zero-resource Dependency Parsing: Boosting Delexicalized Cross-lingual Transfer with Linguistic Knowledge",
"authors": [
{
"first": "Lauriane",
"middle": [],
"last": "Aufrant",
"suffix": ""
},
{
"first": "Guillaume",
"middle": [],
"last": "Wisniewski",
"suffix": ""
},
{
"first": "Fran\u00e7ois",
"middle": [],
"last": "Yvon",
"suffix": ""
}
],
"year": 2016,
"venue": "COLING 2016, the 26th International Conference on Computational Linguistics",
"volume": "",
"issue": "",
"pages": "119--130",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Lauriane Aufrant, Guillaume Wisniewski, and Fran\u00e7ois Yvon. 2016. Zero-resource Dependency Parsing: Boosting Delexicalized Cross-lingual Transfer with Linguistic Knowledge. In COLING 2016, the 26th International Conference on Computational Lin- guistics, pages 119-130, Osaka, Japan. The COL- ING 2016 Organizing Committee.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Phylogenetic grammar induction",
"authors": [
{
"first": "Taylor",
"middle": [],
"last": "Berg",
"suffix": ""
},
{
"first": "-",
"middle": [],
"last": "Kirkpatrick",
"suffix": ""
},
{
"first": "Dan",
"middle": [],
"last": "Klein",
"suffix": ""
}
],
"year": 2010,
"venue": "ACL 2010, Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "1288--1297",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Taylor Berg-Kirkpatrick and Dan Klein. 2010. Phy- logenetic grammar induction. In ACL 2010, Pro- ceedings of the 48th Annual Meeting of the Asso- ciation for Computational Linguistics, July 11-16, 2010, Uppsala, Sweden, pages 1288-1297.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Multitask learning",
"authors": [
{
"first": "Rich",
"middle": [],
"last": "Caruana",
"suffix": ""
}
],
"year": 1997,
"venue": "Machine Learning",
"volume": "28",
"issue": "",
"pages": "41--75",
"other_ids": {
"DOI": [
"10.1023/A:1007379606734"
]
},
"num": null,
"urls": [],
"raw_text": "Rich Caruana. 1997. Multitask learning. Machine Learning, 28(1):41-75.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Linear algorithms for online multitask classification",
"authors": [
{
"first": "Giovanni",
"middle": [],
"last": "Cavallanti",
"suffix": ""
},
{
"first": "Nicol\u00f2",
"middle": [],
"last": "Cesa-Bianchi",
"suffix": ""
},
{
"first": "Claudio",
"middle": [],
"last": "Gentile",
"suffix": ""
}
],
"year": 2010,
"venue": "J. Mach. Learn. Res",
"volume": "11",
"issue": "",
"pages": "2901--2934",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Giovanni Cavallanti, Nicol\u00f2 Cesa-Bianchi, and Claudio Gentile. 2010. Linear algorithms for online multi- task classification. J. Mach. Learn. Res., 11:2901- 2934.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "The hit-scir system for end-to-end parsing of universal dependencies",
"authors": [
{
"first": "Wanxiang",
"middle": [],
"last": "Che",
"suffix": ""
},
{
"first": "Jiang",
"middle": [],
"last": "Guo",
"suffix": ""
},
{
"first": "Yuxuan",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Bo",
"middle": [],
"last": "Zheng",
"suffix": ""
},
{
"first": "Huaipeng",
"middle": [],
"last": "Zhao",
"suffix": ""
},
{
"first": "Yang",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Dechuan",
"middle": [],
"last": "Teng",
"suffix": ""
},
{
"first": "Ting",
"middle": [],
"last": "Liu",
"suffix": ""
}
],
"year": 2017,
"venue": "Proceedings of the CoNLL 2017 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies",
"volume": "",
"issue": "",
"pages": "52--62",
"other_ids": {
"DOI": [
"10.18653/v1/K17-3005"
]
},
"num": null,
"urls": [],
"raw_text": "Wanxiang Che, Jiang Guo, Yuxuan Wang, Bo Zheng, Huaipeng Zhao, Yang Liu, Dechuan Teng, and Ting Liu. 2017. The hit-scir system for end-to-end pars- ing of universal dependencies. In Proceedings of the CoNLL 2017 Shared Task: Multilingual Pars- ing from Raw Text to Universal Dependencies, pages 52-62, Vancouver, Canada. Association for Compu- tational Linguistics.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Towards better ud parsing: Deep contextualized word embeddings, ensemble, and treebank concatenation",
"authors": [
{
"first": "Wanxiang",
"middle": [],
"last": "Che",
"suffix": ""
},
{
"first": "Yijia",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Yuxuan",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Bo",
"middle": [],
"last": "Zheng",
"suffix": ""
},
{
"first": "Ting",
"middle": [],
"last": "Liu",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the CoNLL 2018 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies",
"volume": "",
"issue": "",
"pages": "55--64",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Wanxiang Che, Yijia Liu, Yuxuan Wang, Bo Zheng, and Ting Liu. 2018. Towards better ud parsing: Deep contextualized word embeddings, ensemble, and treebank concatenation. In Proceedings of the CoNLL 2018 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies, pages 55- 64. Association for Computational Linguistics.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Classifying languages by dependency structure. typologies of delexicalized universal dependency treebanks",
"authors": [
{
"first": "Xinying",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Kim",
"middle": [],
"last": "Gerdes",
"suffix": ""
}
],
"year": 2017,
"venue": "Proceedings of the Fourth International Conference on Dependency Linguistics",
"volume": "139",
"issue": "",
"pages": "54--63",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Xinying Chen and Kim Gerdes. 2017. Classifying languages by dependency structure. typologies of delexicalized universal dependency treebanks. In Proceedings of the Fourth International Conference on Dependency Linguistics (Depling 2017), Septem- ber 18-20, 2017, Universit\u00e0 di Pisa, Italy, 139, pages 54-63. Link\u00f6ping University Electronic Press, Link\u00f6pings universitet.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Stanford's graph-based neural dependency parser at the conll 2017 shared task",
"authors": [
{
"first": "Timothy",
"middle": [],
"last": "Dozat",
"suffix": ""
},
{
"first": "Peng",
"middle": [],
"last": "Qi",
"suffix": ""
},
{
"first": "Christopher",
"middle": [
"D"
],
"last": "Manning",
"suffix": ""
}
],
"year": 2017,
"venue": "Proceedings of the CoNLL 2017 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies",
"volume": "",
"issue": "",
"pages": "20--30",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Timothy Dozat, Peng Qi, and Christopher D. Manning. 2017. Stanford's graph-based neural dependency parser at the conll 2017 shared task. In Proceedings of the CoNLL 2017 Shared Task: Multilingual Pars- ing from Raw Text to Universal Dependencies, pages 20-30, Vancouver, Canada. Association for Compu- tational Linguistics.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Three new probabilistic models for dependency parsing: An exploration",
"authors": [
{
"first": "Jason",
"middle": [
"M"
],
"last": "Eisner",
"suffix": ""
}
],
"year": 1996,
"venue": "Proceedings of the 16th Conference on Computational Linguistics",
"volume": "1",
"issue": "",
"pages": "340--345",
"other_ids": {
"DOI": [
"10.3115/992628.992688"
]
},
"num": null,
"urls": [],
"raw_text": "Jason M. Eisner. 1996. Three new probabilistic mod- els for dependency parsing: An exploration. In Pro- ceedings of the 16th Conference on Computational Linguistics -Volume 1, COLING '96, pages 340- 345, Stroudsburg, PA, USA. Association for Com- putational Linguistics.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Long short-term memory",
"authors": [
{
"first": "Sepp",
"middle": [],
"last": "Hochreiter",
"suffix": ""
},
{
"first": "J\u00fcrgen",
"middle": [],
"last": "Schmidhuber",
"suffix": ""
}
],
"year": 1997,
"venue": "Neural Comput",
"volume": "9",
"issue": "8",
"pages": "1735--1780",
"other_ids": {
"DOI": [
"10.1162/neco.1997.9.8.1735"
]
},
"num": null,
"urls": [],
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural Comput., 9(8):1735- 1780.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Support for linguistic macrofamilies from weighted sequence alignment. Proceedings of the National Academy of",
"authors": [
{
"first": "Gerhard",
"middle": [],
"last": "J\u00e4ger",
"suffix": ""
}
],
"year": 2015,
"venue": "Sciences",
"volume": "112",
"issue": "41",
"pages": "12752--12757",
"other_ids": {
"DOI": [
"10.1073/pnas.1500331112"
]
},
"num": null,
"urls": [],
"raw_text": "Gerhard J\u00e4ger. 2015. Support for linguistic macro- families from weighted sequence alignment. Pro- ceedings of the National Academy of Sciences, 112(41):12752-12757.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Google's multilingual neural machine translation system: Enabling zero-shot translation",
"authors": [
{
"first": "Melvin",
"middle": [],
"last": "Johnson",
"suffix": ""
},
{
"first": "Mike",
"middle": [],
"last": "Schuster",
"suffix": ""
},
{
"first": "Quoc",
"middle": [
"V"
],
"last": "Le",
"suffix": ""
},
{
"first": "Maxim",
"middle": [],
"last": "Krikun",
"suffix": ""
},
{
"first": "Yonghui",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "Zhifeng",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Nikhil",
"middle": [],
"last": "Thorat",
"suffix": ""
},
{
"first": "Fernanda",
"middle": [],
"last": "Vi\u00e9gas",
"suffix": ""
},
{
"first": "Martin",
"middle": [],
"last": "Wattenberg",
"suffix": ""
},
{
"first": "Greg",
"middle": [],
"last": "Corrado",
"suffix": ""
},
{
"first": "Macduff",
"middle": [],
"last": "Hughes",
"suffix": ""
},
{
"first": "Jeffrey",
"middle": [],
"last": "Dean",
"suffix": ""
}
],
"year": 2017,
"venue": "Transactions of the Association for Computational Linguistics",
"volume": "5",
"issue": "",
"pages": "339--351",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Melvin Johnson, Mike Schuster, Quoc V. Le, Maxim Krikun, Yonghui Wu, Zhifeng Chen, Nikhil Thorat, Fernanda Vi\u00e9gas, Martin Wattenberg, Greg Corrado, Macduff Hughes, and Jeffrey Dean. 2017. Google's multilingual neural machine translation system: En- abling zero-shot translation. Transactions of the As- sociation for Computational Linguistics, 5:339-351.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Distributed representations of words and phrases and their compositionality",
"authors": [
{
"first": "Tomas",
"middle": [],
"last": "Mikolov",
"suffix": ""
},
{
"first": "Ilya",
"middle": [],
"last": "Sutskever",
"suffix": ""
},
{
"first": "Kai",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Gregory",
"middle": [
"S"
],
"last": "Corrado",
"suffix": ""
},
{
"first": "Jeffrey",
"middle": [],
"last": "Dean",
"suffix": ""
}
],
"year": 2013,
"venue": "Advances in Neural Information Processing Systems 26: 27th Annual Conference on Neural Information Processing Systems 2013. Proceedings of a meeting held",
"volume": "",
"issue": "",
"pages": "3111--3119",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Gregory S. Corrado, and Jeffrey Dean. 2013. Distributed rep- resentations of words and phrases and their compo- sitionality. In Advances in Neural Information Pro- cessing Systems 26: 27th Annual Conference on Neu- ral Information Processing Systems 2013. Proceed- ings of a meeting held December 5-8, 2013, Lake Tahoe, Nevada, United States., pages 3111-3119.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Selective sharing for multilingual dependency parsing",
"authors": [
{
"first": "Tahira",
"middle": [],
"last": "Naseem",
"suffix": ""
},
{
"first": "Regina",
"middle": [],
"last": "Barzilay",
"suffix": ""
},
{
"first": "Amir",
"middle": [],
"last": "Globerson",
"suffix": ""
}
],
"year": 2012,
"venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics: Long Papers",
"volume": "1",
"issue": "",
"pages": "629--637",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Tahira Naseem, Regina Barzilay, and Amir Globerson. 2012. Selective sharing for multilingual dependency parsing. In Proceedings of the 50th Annual Meet- ing of the Association for Computational Linguis- tics: Long Papers -Volume 1, ACL '12, pages 629- 637, Stroudsburg, PA, USA. Association for Compu- tational Linguistics.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Faculty of Mathematics and Physics",
"authors": [
{
"first": "Katar\u00edna",
"middle": [],
"last": "Gajdo\u0161ov\u00e1",
"suffix": ""
},
{
"first": "Daniel",
"middle": [],
"last": "Galbraith",
"suffix": ""
},
{
"first": "Marcos",
"middle": [],
"last": "Garcia",
"suffix": ""
},
{
"first": "Moa",
"middle": [],
"last": "G\u00e4rdenfors",
"suffix": ""
},
{
"first": "Kim",
"middle": [],
"last": "Gerdes",
"suffix": ""
},
{
"first": "Filip",
"middle": [],
"last": "Ginter",
"suffix": ""
},
{
"first": "Iakes",
"middle": [],
"last": "Goenaga",
"suffix": ""
},
{
"first": "Koldo",
"middle": [],
"last": "Gojenola",
"suffix": ""
},
{
"first": "Memduh",
"middle": [],
"last": "G\u00f6k\u0131rmak",
"suffix": ""
},
{
"first": "Yoav",
"middle": [],
"last": "Goldberg",
"suffix": ""
},
{
"first": "Xavier",
"middle": [
"G\u00f3mez"
],
"last": "Guinovart",
"suffix": ""
},
{
"first": "Berta",
"middle": [
"Gonz\u00e1les"
],
"last": "Saavedra",
"suffix": ""
},
{
"first": "Matias",
"middle": [],
"last": "Grioni",
"suffix": ""
},
{
"first": "Normunds",
"middle": [],
"last": "Gr\u016bz\u012btis",
"suffix": ""
},
{
"first": "Bruno",
"middle": [],
"last": "Guillaume",
"suffix": ""
},
{
"first": "C\u00e9line",
"middle": [],
"last": "Guillot-Barbance",
"suffix": ""
},
{
"first": "Nizar",
"middle": [],
"last": "Habash",
"suffix": ""
},
{
"first": "Jan",
"middle": [],
"last": "Haji\u010d",
"suffix": ""
},
{
"first": "Jan",
"middle": [],
"last": "Haji\u010d Jr",
"suffix": ""
},
{
"first": "Na-Rae",
"middle": [],
"last": "Linh H\u00e0 My",
"suffix": ""
},
{
"first": "Kim",
"middle": [],
"last": "Han",
"suffix": ""
},
{
"first": "Dag",
"middle": [],
"last": "Harris",
"suffix": ""
},
{
"first": "Barbora",
"middle": [],
"last": "Haug",
"suffix": ""
},
{
"first": "Jaroslava",
"middle": [],
"last": "Hladk\u00e1",
"suffix": ""
},
{
"first": "Florinel",
"middle": [],
"last": "Hlav\u00e1\u010dov\u00e1",
"suffix": ""
},
{
"first": "Petter",
"middle": [],
"last": "Hociung",
"suffix": ""
},
{
"first": "Jena",
"middle": [],
"last": "Hohle",
"suffix": ""
},
{
"first": "Radu",
"middle": [],
"last": "Hwang",
"suffix": ""
},
{
"first": "Elena",
"middle": [],
"last": "Ion",
"suffix": ""
},
{
"first": "Tom\u00e1\u0161",
"middle": [],
"last": "Irimia",
"suffix": ""
},
{
"first": "Anders",
"middle": [],
"last": "Jel\u00ednek",
"suffix": ""
},
{
"first": "Fredrik",
"middle": [],
"last": "Johannsen",
"suffix": ""
},
{
"first": "H\u00fcner",
"middle": [],
"last": "J\u00f8rgensen",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Ka\u015f\u0131kara",
"suffix": ""
},
{
"first": "Hiroshi",
"middle": [],
"last": "Sylvain Kahane",
"suffix": ""
},
{
"first": "Jenna",
"middle": [],
"last": "Kanayama",
"suffix": ""
},
{
"first": "Tolga",
"middle": [],
"last": "Kanerva",
"suffix": ""
},
{
"first": "V\u00e1clava",
"middle": [],
"last": "Kayadelen",
"suffix": ""
},
{
"first": "Jesse",
"middle": [],
"last": "Kettnerov\u00e1",
"suffix": ""
},
{
"first": "Natalia",
"middle": [],
"last": "Kirchner",
"suffix": ""
},
{
"first": "Simon",
"middle": [],
"last": "Kotsyba",
"suffix": ""
},
{
"first": "Sookyoung",
"middle": [],
"last": "Krek",
"suffix": ""
},
{
"first": "Veronika",
"middle": [],
"last": "Kwak",
"suffix": ""
},
{
"first": "Lorenzo",
"middle": [],
"last": "Laippala",
"suffix": ""
},
{
"first": "Tatiana",
"middle": [],
"last": "Lambertino",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Lando",
"suffix": ""
},
{
"first": "Alexei",
"middle": [],
"last": "Septina Dian Larasati",
"suffix": ""
},
{
"first": "John",
"middle": [],
"last": "Lavrentiev",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Lee",
"suffix": ""
},
{
"first": "Alessandro",
"middle": [],
"last": "Ph\u01b0\u01a1ng L\u00ea H\u00f4\u01f9g",
"suffix": ""
},
{
"first": "Saran",
"middle": [],
"last": "Lenci",
"suffix": ""
},
{
"first": "Herman",
"middle": [],
"last": "Lertpradit",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Leung",
"suffix": ""
},
{
"first": "Ying",
"middle": [],
"last": "Cheuk",
"suffix": ""
},
{
"first": "Josie",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Keying",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Kyungtae",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Nikola",
"middle": [],
"last": "Lim",
"suffix": ""
},
{
"first": "Olga",
"middle": [],
"last": "Ljube\u0161i\u0107",
"suffix": ""
},
{
"first": "Olga",
"middle": [],
"last": "Loginova",
"suffix": ""
},
{
"first": "Teresa",
"middle": [],
"last": "Lyashevskaya",
"suffix": ""
},
{
"first": "Vivien",
"middle": [],
"last": "Lynn",
"suffix": ""
},
{
"first": "Aibek",
"middle": [],
"last": "Macketanz",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Makazhanov",
"suffix": ""
},
{
"first": "Christopher",
"middle": [],
"last": "Mandl",
"suffix": ""
},
{
"first": "Ruli",
"middle": [],
"last": "Manning",
"suffix": ""
},
{
"first": "C\u0103t\u0103lina",
"middle": [],
"last": "Manurung",
"suffix": ""
},
{
"first": "David",
"middle": [],
"last": "M\u0103r\u0103nduc",
"suffix": ""
},
{
"first": "Katrin",
"middle": [],
"last": "Mare\u010dek",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Marheinecke",
"suffix": ""
},
{
"first": "Andr\u00e9",
"middle": [],
"last": "H\u00e9ctor Mart\u00ednez Alonso",
"suffix": ""
},
{
"first": "Jan",
"middle": [],
"last": "Martins",
"suffix": ""
},
{
"first": "Yuji",
"middle": [],
"last": "Ma\u0161ek",
"suffix": ""
},
{
"first": "Ryan",
"middle": [],
"last": "Matsumoto",
"suffix": ""
},
{
"first": "Gustavo",
"middle": [],
"last": "Mcdonald",
"suffix": ""
},
{
"first": "Niko",
"middle": [],
"last": "Mendon\u00e7a",
"suffix": ""
},
{
"first": "Anna",
"middle": [],
"last": "Miekka",
"suffix": ""
},
{
"first": "C\u0103t\u0103lin",
"middle": [],
"last": "Missil\u00e4",
"suffix": ""
},
{
"first": "Yusuke",
"middle": [],
"last": "Mititelu",
"suffix": ""
},
{
"first": "Simonetta",
"middle": [],
"last": "Miyao",
"suffix": ""
},
{
"first": "Amir",
"middle": [],
"last": "Montemagni",
"suffix": ""
},
{
"first": "Laura",
"middle": [
"Moreno"
],
"last": "More",
"suffix": ""
},
{
"first": "Shinsuke",
"middle": [],
"last": "Romero",
"suffix": ""
},
{
"first": "Bjartur",
"middle": [],
"last": "Mori",
"suffix": ""
},
{
"first": "Bohdan",
"middle": [],
"last": "Mortensen",
"suffix": ""
},
{
"first": "Kadri",
"middle": [],
"last": "Moskalevskyi",
"suffix": ""
},
{
"first": "Yugo",
"middle": [],
"last": "Muischnek",
"suffix": ""
},
{
"first": "Kaili",
"middle": [],
"last": "Murawaki",
"suffix": ""
},
{
"first": "Pinkey",
"middle": [],
"last": "M\u00fc\u00fcrisep",
"suffix": ""
},
{
"first": "Juan Ignacio Navarro",
"middle": [],
"last": "Nainwani",
"suffix": ""
},
{
"first": "Anna",
"middle": [],
"last": "Hor\u00f1iacek",
"suffix": ""
},
{
"first": "Gunta",
"middle": [],
"last": "Nedoluzhko",
"suffix": ""
},
{
"first": "L\u01b0\u01a1ng",
"middle": [],
"last": "Ne\u0161pore-B\u0113rzkalne",
"suffix": ""
},
{
"first": "Huy\u00ea\u01f9 Nguy\u00ea\u00f1 Th\u1ecb",
"middle": [],
"last": "Nguy\u00ea\u00f1 Th\u1ecb",
"suffix": ""
},
{
"first": "Vitaly",
"middle": [],
"last": "Minh",
"suffix": ""
},
{
"first": "Rattima",
"middle": [],
"last": "Nikolaev",
"suffix": ""
},
{
"first": "Hanna",
"middle": [],
"last": "Nitisaroj",
"suffix": ""
},
{
"first": "Stina",
"middle": [],
"last": "Nurmi",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Ojala",
"suffix": ""
},
{
"first": "Mai",
"middle": [],
"last": "Ad\u00e9day\u1ecd\u0300ol\u00fa\u00f2kun",
"suffix": ""
},
{
"first": "Petya",
"middle": [],
"last": "Omura",
"suffix": ""
},
{
"first": "Robert",
"middle": [],
"last": "Osenova",
"suffix": ""
},
{
"first": "Lilja",
"middle": [],
"last": "\u00d6stling",
"suffix": ""
},
{
"first": "Niko",
"middle": [],
"last": "\u00d8vrelid",
"suffix": ""
},
{
"first": "Elena",
"middle": [],
"last": "Partanen",
"suffix": ""
},
{
"first": "Marco",
"middle": [],
"last": "Pascual",
"suffix": ""
},
{
"first": "Agnieszka",
"middle": [],
"last": "Passarotti",
"suffix": ""
},
{
"first": "Siyao",
"middle": [],
"last": "Patejuk",
"suffix": ""
},
{
"first": "Cenel-Augusto",
"middle": [],
"last": "Peng",
"suffix": ""
},
{
"first": "Guy",
"middle": [],
"last": "Perez",
"suffix": ""
},
{
"first": "Georg",
"middle": [],
"last": "Perrier ; Siva Reddy",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Rehm",
"suffix": ""
},
{
"first": "Larissa",
"middle": [],
"last": "Rie\u00dfler",
"suffix": ""
},
{
"first": "Laura",
"middle": [],
"last": "Rinaldi",
"suffix": ""
},
{
"first": "Luisa",
"middle": [],
"last": "Rituma",
"suffix": ""
},
{
"first": "Mykhailo",
"middle": [],
"last": "Rocha",
"suffix": ""
},
{
"first": "Rudolf",
"middle": [],
"last": "Romanenko",
"suffix": ""
},
{
"first": "Davide",
"middle": [],
"last": "Rosa",
"suffix": ""
},
{
"first": "Valentin",
"middle": [],
"last": "Rovati",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Ro\u0219ca",
"suffix": ""
}
],
"year": 2018,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Katar\u00edna Gajdo\u0161ov\u00e1, Daniel Galbraith, Marcos Garcia, Moa G\u00e4rdenfors, Kim Gerdes, Filip Gin- ter, Iakes Goenaga, Koldo Gojenola, Memduh G\u00f6k\u0131rmak, Yoav Goldberg, Xavier G\u00f3mez Guino- vart, Berta Gonz\u00e1les Saavedra, Matias Grioni, Normunds Gr\u016bz\u012btis, Bruno Guillaume, C\u00e9line Guillot-Barbance, Nizar Habash, Jan Haji\u010d, Jan Haji\u010d jr., Linh H\u00e0 My, Na-Rae Han, Kim Harris, Dag Haug, Barbora Hladk\u00e1, Jaroslava Hlav\u00e1\u010dov\u00e1, Florinel Hociung, Petter Hohle, Jena Hwang, Radu Ion, Elena Irimia, Tom\u00e1\u0161 Jel\u00ednek, Anders Johannsen, Fredrik J\u00f8rgensen, H\u00fcner Ka\u015f\u0131kara, Sylvain Kahane, Hiroshi Kanayama, Jenna Kanerva, Tolga Kayade- len, V\u00e1clava Kettnerov\u00e1, Jesse Kirchner, Natalia Kotsyba, Simon Krek, Sookyoung Kwak, Veronika Laippala, Lorenzo Lambertino, Tatiana Lando, Septina Dian Larasati, Alexei Lavrentiev, John Lee, Ph\u01b0\u01a1ng L\u00ea H\u00f4\u01f9g, Alessandro Lenci, Saran Lertpradit, Herman Leung, Cheuk Ying Li, Josie Li, Keying Li, KyungTae Lim, Nikola Ljube\u0161i\u0107, Olga Loginova, Olga Lyashevskaya, Teresa Lynn, Vivien Macketanz, Aibek Makazhanov, Michael Mandl, Christopher Manning, Ruli Manurung, C\u0103t\u0103lina M\u0103r\u0103nduc, David Mare\u010dek, Katrin Marheinecke, H\u00e9ctor Mart\u00ednez Alonso, Andr\u00e9 Martins, Jan Ma\u0161ek, Yuji Matsumoto, Ryan McDonald, Gustavo Mendon\u00e7a, Niko Miekka, Anna Missil\u00e4, C\u0103t\u0103lin Mititelu, Yusuke Miyao, Simonetta Montemagni, Amir More, Laura Moreno Romero, Shinsuke Mori, Bjartur Mortensen, Bohdan Moskalevskyi, Kadri Muischnek, Yugo Murawaki, Kaili M\u00fc\u00fcrisep, Pinkey Nainwani, Juan Ignacio Navarro Hor\u00f1iacek, Anna Nedoluzhko, Gunta Ne\u0161pore-B\u0113rzkalne, L\u01b0\u01a1ng Nguy\u00ea\u00f1 Th\u1ecb, Huy\u00ea\u01f9 Nguy\u00ea\u00f1 Th\u1ecb Minh, Vitaly Nikolaev, Rattima Nitisaroj, Hanna Nurmi, Stina Ojala, Ad\u00e9day\u1ecd\u0300Ol\u00fa\u00f2kun, Mai Omura, Petya Osenova, Robert \u00d6stling, Lilja \u00d8vrelid, Niko Partanen, Elena Pascual, Marco Passarotti, Agnieszka Patejuk, Siyao Peng, Cenel-Augusto Perez, Guy Perrier, Slav Petrov, Jussi Piitulainen, Emily Pitler, Barbara Plank, Thierry Poibeau, Martin Popel, Lauma Pretkalni\u0146a, Sophie Pr\u00e9vost, Prokopis Prokopidis, Adam Przepi\u00f3rkowski, Tiina Puolakainen, Sampo Pyysalo, Andriela R\u00e4\u00e4bis, Alexandre Rademaker, Loganathan Ramasamy, Taraka Rama, Carlos Ramisch, Vinit Ravishankar, Livy Real, Siva Reddy, Georg Rehm, Michael Rie\u00dfler, Larissa Rinaldi, Laura Rituma, Luisa Rocha, Mykhailo Romanenko, Rudolf Rosa, Da- vide Rovati, Valentin Ro\u0219ca, Olga Rudina, Shoval Sadde, Shadi Saleh, Tanja Samard\u017ei\u0107, Stephanie Samson, Manuela Sanguinetti, Baiba Saul\u012bte, Yanin Sawanakunanon, Nathan Schneider, Sebas- tian Schuster, Djam\u00e9 Seddah, Wolfgang Seeker, Mojgan Seraji, Mo Shen, Atsuko Shimada, Muh Shohibussirri, Dmitry Sichinava, Natalia Silveira, Maria Simi, Radu Simionescu, Katalin Simk\u00f3, M\u00e1ria \u0160imkov\u00e1, Kiril Simov, Aaron Smith, Isabela Soares-Bastos, Antonio Stella, Milan Straka, Jana Strnadov\u00e1, Alane Suhr, Umut Sulubacak, Zsolt Sz\u00e1nt\u00f3, Dima Taji, Yuta Takahashi, Takaaki Tanaka, Isabelle Tellier, Trond Trosterud, Anna Trukhina, Reut Tsarfaty, Francis Tyers, Sumire Uematsu, Zde\u0148ka Ure\u0161ov\u00e1, Larraitz Uria, Hans Uszkoreit, Sowmya Vajjala, Daniel van Niekerk, Gertjan van Noord, Viktor Varga, Veronika Vincze, Lars Wallin, Jonathan North Washington, Seyi Williams, Mats Wir\u00e9n, Tsegay Woldemariam, Tak-sum Wong, Chunxiao Yan, Marat M. Yavrumyan, Zhuoran Yu, Zden\u011bk \u017dabokrtsk\u00fd, Amir Zeldes, Daniel Zeman, Manying Zhang, and Hanzhi Zhu. 2018. Universal dependencies 2.2. LINDAT/CLARIN digital library at the Institute of Formal and Applied Linguistics (\u00daFAL), Faculty of Mathematics and Physics, Charles University.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Empirically sampling universal dependencies",
"authors": [
{
"first": "Natalie",
"middle": [],
"last": "Schluter",
"suffix": ""
},
{
"first": "\u017deljko",
"middle": [],
"last": "Agi\u0107",
"suffix": ""
}
],
"year": 2017,
"venue": "Proceedings of the NoDaLiDa 2017 Workshop on Universal Dependencies",
"volume": "",
"issue": "",
"pages": "117--122",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Natalie Schluter and \u017deljko Agi\u0107. 2017. Empirically sampling universal dependencies. In Proceedings of the NoDaLiDa 2017 Workshop on Universal Depen- dencies (UDW 2017), pages 117-122.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Ethnologue: Languages of the World, Twenty-first edition",
"authors": [
{
"first": "F",
"middle": [],
"last": "Gary",
"suffix": ""
},
{
"first": "Charles",
"middle": [
"D"
],
"last": "Simons",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Fennig",
"suffix": ""
}
],
"year": 2018,
"venue": "SIL International",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Gary F. Simons and Charles D. Fennig, editors. 2018. Ethnologue: Languages of the World, Twenty-first edition. SIL International, Dallas, TX, USA.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Deep multitask learning with low level tasks supervised at lower layers",
"authors": [
{
"first": "Anders",
"middle": [],
"last": "S\u00f8gaard",
"suffix": ""
},
{
"first": "Yoav",
"middle": [],
"last": "Goldberg",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics, ACL 2016",
"volume": "2",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Anders S\u00f8gaard and Yoav Goldberg. 2016. Deep multi- task learning with low level tasks supervised at lower layers. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics, ACL 2016, August 7-12, 2016, Berlin, Germany, Volume 2: Short Papers.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Parser training with heterogeneous treebanks",
"authors": [
{
"first": "Sara",
"middle": [],
"last": "Stymne",
"suffix": ""
},
{
"first": "Aaron",
"middle": [],
"last": "Miryam De Lhoneux",
"suffix": ""
},
{
"first": "Joakim",
"middle": [],
"last": "Smith",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Nivre",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics",
"volume": "2",
"issue": "",
"pages": "619--625",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sara Stymne, Miryam de Lhoneux, Aaron Smith, and Joakim Nivre. 2018. Parser training with hetero- geneous treebanks. In Proceedings of the 56th An- nual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 619- 625, Melbourne, Australia. Association for Compu- tational Linguistics.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"uris": null,
"num": null,
"type_str": "figure",
"text": "A possible phylogenetic tree for languages in the Slavic family."
},
"FIGREF1": {
"uris": null,
"num": null,
"type_str": "figure",
"text": "Bi-LSTM architecture for character based word representation. The final representation is the concatenation of the final cells of each layer."
},
"FIGREF2": {
"uris": null,
"num": null,
"type_str": "figure",
"text": "Neural network architecture for edge scoring. The contextualised representation of the governor (eat) and the dependent (Cats) are concatenated and passed through a rectified linear layer and a final plain linear layer to get a vector of label scores."
},
"TABREF2": {
"type_str": "table",
"html": null,
"text": "",
"content": "<table><tr><td>: Parsing results for languages with a training set</td></tr><tr><td>for phylogenetic models and independent models. The</td></tr><tr><td>training set size of languages without a developpement</td></tr><tr><td>set are reported in brackets.</td></tr></table>",
"num": null
},
"TABREF3": {
"type_str": "table",
"html": null,
"text": "Accuracy of languages without a training set.",
"content": "<table/>",
"num": null
}
}
}
}