| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:31:11.843122Z" |
| }, |
| "title": "SIGMORPHON 2020 Task 0 System Description ETH Z\u00fcrich Team", |
| "authors": [ |
| { |
| "first": "Martina", |
| "middle": [], |
| "last": "Forster", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "ETH Z\u00fcrich", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Clara", |
| "middle": [], |
| "last": "Meister", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "ETH Z\u00fcrich", |
| "location": {} |
| }, |
| "email": "clara.meister@inf.ethz.ch" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper presents our system for the SIG-MORPHON 2020 Shared Task. We build off of the baseline systems, performing exact inference on models trained on language family data. Our systems return the globally best solution under these models. Our two systems achieve 80.9% and 75.6% accuracy on the test set. We ultimately find that, in this setting, exact inference does not seem to help or hinder the performance of morphological inflection generators, which stands in contrast to its affect on Neural Machine Translation (NMT) models.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper presents our system for the SIG-MORPHON 2020 Shared Task. We build off of the baseline systems, performing exact inference on models trained on language family data. Our systems return the globally best solution under these models. Our two systems achieve 80.9% and 75.6% accuracy on the test set. We ultimately find that, in this setting, exact inference does not seem to help or hinder the performance of morphological inflection generators, which stands in contrast to its affect on Neural Machine Translation (NMT) models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Morphological inflection generation is the task of generating a specific word form given a lemma and a set of morphological tags. It has a wide range of applications-in particular, it can be useful for morphologically rich, but low-resource languages. If a language has complex morphology, but only scarce data are available, vocabulary coverage is often poor. In such cases, morphological inflection can be used to generate additional word forms for training data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Typologically diverse morphological inflection is the focus of task 0 of the SIGMORPHON Shared Tasks (Vylomova et al., 2020) , to which we submit this system. Specifically, the task requires the aforementioned transformation from lemma and morphological tags to inflected form. A main challenge of the task is that it covers a typologically diverse set of languages, i.e. languages have a wide range of structural patterns and features. Additionally, for a portion of these languages, only scant resources are available.", |
| "cite_spans": [ |
| { |
| "start": 101, |
| "end": 124, |
| "text": "(Vylomova et al., 2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our approach is to train models on language families rather than solely on individual languages. This strategy should help us overcome the problems frequently encountered for low-resource tasks, e.g., overfitting, by increasing the amount of training data used for each model. The strategy is viable due to the typological similarities between languages within the same family. We combine two of the neural baseline architectures provided by the task organizers, a multilingual Transformer (Wu et al., 2020) and a (neuralized) hidden Markov model with hard monotonic attention (Wu and Cotterell, 2019) , albeit with a different decoding strategy: we perform exact inference, returning the globally optimal solution under the model.", |
| "cite_spans": [ |
| { |
| "start": 490, |
| "end": 507, |
| "text": "(Wu et al., 2020)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 577, |
| "end": 601, |
| "text": "(Wu and Cotterell, 2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Neural character-to-character transducers (Faruqui et al., 2016; Kann and Sch\u00fctze, 2016 ) define a probability distribution p \u03b8 (y | x), where \u03b8 is a set of weights learned by a neural network and x and y are inputs and (possible) outputs, respectively. In the case of morphological inflection, x represents the lemma we are trying to inflect and the morphosyntactic description (MSDs) indicating the inflection we desire; y is then a candidate inflected form of the lemma from the set of all valid character sequences Y. Note that valid character sequences are padded with distinguished tokens, BOS and EOS, indicating the beginning and end of the sequence.", |
| "cite_spans": [ |
| { |
| "start": 42, |
| "end": 64, |
| "text": "(Faruqui et al., 2016;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 65, |
| "end": 87, |
| "text": "Kann and Sch\u00fctze, 2016", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The neural character-to-character transducers we consider in this work are locally normalized. Specifically, the model p \u03b8 is a probability distribution over the set of possible characters which models p \u03b8 (\u2022 | x, y <t ) for any time step t. By the chain rule of probability, p \u03b8 (y | x) decomposes as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p \u03b8 (y | x) = |y| t=1 p \u03b8 (y t | x, y <t )", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The decoding objective then aims to find the most probable sequence among all valid sequences:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "y = argmax y\u2208Y log p \u03b8 (y | x)", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "This is known as maximum a posteriori (MAP) decoding. While the above optimization problem implies that we find the global optimum y , we often only perform a heuristic search, e.g., beam search, since performing exact search can be quite computationally expensive due to the size of Y and the dependency of p \u03b8 (\u2022 | x, y <t ) on all previous output tokens. For neural machine translation (NMT) specifically, while beam search often yields better results than greedy search, translation quality almost always decreases for beam sizes larger than 5. We refer the interested reader to the large number of works that have studied this phenomenon in detail (Koehn and Knowles, 2017; Murray and Chiang, 2018; Yang et al., 2018; Stahlberg and Byrne, 2019) . Exact decoding effectively stretches the beam size to infinity (i.e. does not limit it), finding the globally best solution. While the effects of exact decoding have been explored for neural machine translation (Stahlberg and Byrne, 2019) , to the best of our knowledge, they have not yet been explored for morphological inflection generation. This is a natural research question as the architectures of morphological inflection generation systems are often based off of those for NMT.", |
| "cite_spans": [ |
| { |
| "start": 653, |
| "end": 678, |
| "text": "(Koehn and Knowles, 2017;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 679, |
| "end": 703, |
| "text": "Murray and Chiang, 2018;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 704, |
| "end": 722, |
| "text": "Yang et al., 2018;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 723, |
| "end": 749, |
| "text": "Stahlberg and Byrne, 2019)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 963, |
| "end": 990, |
| "text": "(Stahlberg and Byrne, 2019)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We use the data provided by the SIGMORPHON 2020 shared task, which features lemmas, inflections, and corresponding MSDs (following unimorph schema (Kirov et al., 2018) ) for 90 languages in total. Data was released in two phases; the first phase included languages from five families: Austronesian, Niger-Congo, Uralic, Oto-Manguean, and Indo-European. Data from the second phase included languages belonging to Afro-Asiatic, Algic, Australian, Dravidian, Germanic, Indo-Aryan, Iranian, Niger-Congo, Nilo-Sahan, Romance, Sino-Tibetan, Siouan, Tungusic, Turkic, Uralic, and Uto-Aztecan families. The full list of languages can be found on the task website: https://sigmorphon.github.io/ sharedtasks/2020/task0/.", |
| "cite_spans": [ |
| { |
| "start": 147, |
| "end": 167, |
| "text": "(Kirov et al., 2018)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Due to scarcity of resources available to the task organizers, many of the languages had only a few morphological forms annotated. For example, Zarma, a Songhay language, had only 56 available inflections in the training set and 9 in the development set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Our systems are built using two model architectures provided as baselines by the task organizers: a multilingual Transformer (Wu et al., 2020 ) and a (neuralized) hidden Markov model (HMM) with hard monotonic attention (Wu and Cotterell, 2019) . We then perform exact inference on the models. The following subsections explain the two components separately.", |
| "cite_spans": [ |
| { |
| "start": 125, |
| "end": 141, |
| "text": "(Wu et al., 2020", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 219, |
| "end": 243, |
| "text": "(Wu and Cotterell, 2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System description", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The architectures of both models exactly follow those of the Transformer and HMM proposed as baselines for the SIGMORPHON 2020 Task 0. We do this in part to create a clear comparison between morphological inflection generation systems that perform inference with exact vs. heuristic decoding strategies.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Architectures", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We trained HMMs for each language family for a maximum of 50 epochs and Transformers for a maximum of 20000 steps. Early stopping was performed if subsequent validation set losses differed by less than 1e \u2212 3. Batch sizes of 30 and 100, respectively, were used. Other training configurations followed those of the baseline systems.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Architectures", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Due to the resource scarcity for many of the task's languages, we used entire language families to train models rather than individual languages. Specifically, we aggregated the data from all languages of a given family, using a cross-lingual learning approach. We did not subsequently finetune the models on individual languages. Specifically, we do not do any additional training on individual languages nor do we re-target the vocabulary during decoding. This means generation of invalid characters (i.e. invalid for a specific language) is possible.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Architectures", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "For decoding, we perform exact inference with a search strategy built on top of the SGNMT library (Stahlberg et al., 2017) . Specifically, we use Dijkstra's search algorithm, which provably returns the optimal solution when path scores monotonically decrease with length. From equation 1, we can see that the scoring function for sequences y is monotonically decreasing in t, therefore meeting this criterion. Additionally, to prevent a large memory footprint, we can lower bound the search by the score of the empty string, i.e. stop exploring solutions whose scores become less than the empty string at any point in time. We return the globally best inflection.", |
| "cite_spans": [ |
| { |
| "start": 98, |
| "end": 122, |
| "text": "(Stahlberg et al., 2017)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoding", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Results on the test data from SIGMORPHON 2020 Task 0 can be found in Table 3 . For comparison purposes, Tables 1 and 2 show the performance of our models with greedy and beam search for a selection of languages.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 69, |
| "end": 76, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 104, |
| "end": 118, |
| "text": "Tables 1 and 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results on the Shared Task test data", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The results in Table 3 indicate that the HMM performed better in combination with exact decoding than the Transformer. On average over the 90 languages, the HMM achieved an accuracy of 80.9% in comparison to only 75.6% for the Transformer. Performance by Levenshtein distance looks similar: the average Levenshtein distances were 0.5 and 0.62 for the HMM and Transformer, respectively. A particularly interesting language to study in this scenario is Zarma (dje), which only has 56 samples in the training set, 9 samples in the development set and 16 samples in the test set. Moreover, it is the only language in its family, Nilo-Sahan. The terrible performance of our system on this language compared with greedy search suggests that low-resource settings may lead to weak performance with exact decoding. Out of the other languages that performed poorly, many were from the Germanic and Uralic family. Poor performance on these languages may stem from the fact that they belong to a family with high-resource languages. As we trained on language family data and did not fine-tune the models, it is possible that lower-resource languages in a high-resource family, which are underrepresented in the training data, are not adequately modelled. In these setting, performance would likely be improve noticeably by fine-tuning on the individual languages.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 15, |
| "end": 22, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We perform exact inference on two baseline neural architectures for morphological inflection, a Transformer and a (neuralized) hidden Markov model with hard monotonic attention, to find the inflections with the globally best score under the model. On test data, the hidden Markov model showed better results: on average, it achieved 80.9% accuracy and a Levenshtein distance of 0.5, while the Transformer performed worse with 75.6% and 0.62 respectively. Overall, exact decoding of morphological inflection generators does not appear to significantly affect model performance compared with greedy search. This is notable when compared with NMT systems, for which exact search often leads to performance degradation. Table 3 : Accuracy and Levenshtein distance for both of our systems, as well as for the baselines.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 716, |
| "end": 723, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Morphological inflection generation using character sequence to sequence learning", |
| "authors": [ |
| { |
| "first": "Manaal", |
| "middle": [], |
| "last": "Faruqui", |
| "suffix": "" |
| }, |
| { |
| "first": "Yulia", |
| "middle": [], |
| "last": "Tsvetkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "634--643", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N16-1077" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manaal Faruqui, Yulia Tsvetkov, Graham Neubig, and Chris Dyer. 2016. Morphological inflection genera- tion using character sequence to sequence learning. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 634-643, San Diego, California. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Singlemodel encoder-decoder with explicit morphological representation for reinflection", |
| "authors": [ |
| { |
| "first": "Katharina", |
| "middle": [], |
| "last": "Kann", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "555--560", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-2090" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Katharina Kann and Hinrich Sch\u00fctze. 2016. Single- model encoder-decoder with explicit morphological representation for reinflection. In Proceedings of the 54th Annual Meeting of the Association for Compu- tational Linguistics (Volume 2: Short Papers), pages 555-560, Berlin, Germany. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "UniMorph 2.0: Universal morphology", |
| "authors": [ |
| { |
| "first": "Christo", |
| "middle": [], |
| "last": "Kirov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Cotterell", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Sylak-Glassman", |
| "suffix": "" |
| }, |
| { |
| "first": "G\u00e9raldine", |
| "middle": [], |
| "last": "Walther", |
| "suffix": "" |
| }, |
| { |
| "first": "Ekaterina", |
| "middle": [], |
| "last": "Vylomova", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Xia", |
| "suffix": "" |
| }, |
| { |
| "first": "Manaal", |
| "middle": [], |
| "last": "Faruqui", |
| "suffix": "" |
| }, |
| { |
| "first": "Sabrina", |
| "middle": [ |
| "J" |
| ], |
| "last": "Mielke", |
| "suffix": "" |
| }, |
| { |
| "first": "Arya", |
| "middle": [], |
| "last": "Mc-Carthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandra", |
| "middle": [], |
| "last": "K\u00fcbler", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Yarowsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christo Kirov, Ryan Cotterell, John Sylak-Glassman, G\u00e9raldine Walther, Ekaterina Vylomova, Patrick Xia, Manaal Faruqui, Sabrina J. Mielke, Arya Mc- Carthy, Sandra K\u00fcbler, David Yarowsky, Jason Eis- ner, and Mans Hulden. 2018. UniMorph 2.0: Uni- versal morphology. In Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan. Eu- ropean Language Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Six challenges for neural machine translation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Knowles", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the First Workshop on Neural Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "28--39", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W17-3204" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn and Rebecca Knowles. 2017. Six chal- lenges for neural machine translation. In Proceed- ings of the First Workshop on Neural Machine Trans- lation, pages 28-39, Vancouver. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Correcting length bias in neural machine translation", |
| "authors": [ |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Murray", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Chiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Third Conference on Machine Translation: Research Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "212--223", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-6322" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kenton Murray and David Chiang. 2018. Correct- ing length bias in neural machine translation. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 212-223, Brus- sels, Belgium. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "On NMT search errors and model errors: Cat got your tongue?", |
| "authors": [ |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Stahlberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Byrne", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "3356--3362", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1331" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Felix Stahlberg and Bill Byrne. 2019. On NMT search errors and model errors: Cat got your tongue? In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 3356- 3362, Hong Kong, China. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "SGNMT -a flexible NMT decoding platform for quick prototyping of new models and search strategies", |
| "authors": [ |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Stahlberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Eva", |
| "middle": [], |
| "last": "Hasler", |
| "suffix": "" |
| }, |
| { |
| "first": "Danielle", |
| "middle": [], |
| "last": "Saunders", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Byrne", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "25--30", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D17-2005" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Felix Stahlberg, Eva Hasler, Danielle Saunders, and Bill Byrne. 2017. SGNMT -a flexible NMT de- coding platform for quick prototyping of new mod- els and search strategies. In Proceedings of the 2017 Conference on Empirical Methods in Natu- ral Language Processing: System Demonstrations, pages 25-30, Copenhagen, Denmark. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Miikka Silfverberg, and Mans Hulden. 2020. The SIG-MORPHON 2020 Shared Task 0: Typologically diverse morphological inflection", |
| "authors": [ |
| { |
| "first": "Ekaterina", |
| "middle": [], |
| "last": "Vylomova", |
| "suffix": "" |
| }, |
| { |
| "first": "Jennifer", |
| "middle": [], |
| "last": "White", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Salesky", |
| "suffix": "" |
| }, |
| { |
| "first": "Sabrina", |
| "middle": [ |
| "J" |
| ], |
| "last": "Mielke", |
| "suffix": "" |
| }, |
| { |
| "first": "Shijie", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Edoardo", |
| "middle": [], |
| "last": "Ponti", |
| "suffix": "" |
| }, |
| { |
| "first": "Rowan", |
| "middle": [], |
| "last": "Hall Maudslay", |
| "suffix": "" |
| }, |
| { |
| "first": "Ran", |
| "middle": [], |
| "last": "Zmigrod", |
| "suffix": "" |
| }, |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Valvoda", |
| "suffix": "" |
| }, |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Toldova", |
| "suffix": "" |
| }, |
| { |
| "first": "Francis", |
| "middle": [], |
| "last": "Tyers", |
| "suffix": "" |
| }, |
| { |
| "first": "Elena", |
| "middle": [], |
| "last": "Klyachko", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Yegorov", |
| "suffix": "" |
| }, |
| { |
| "first": "Natalia", |
| "middle": [], |
| "last": "Krizhanovsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Paula", |
| "middle": [], |
| "last": "Czarnowska", |
| "suffix": "" |
| }, |
| { |
| "first": "Irene", |
| "middle": [], |
| "last": "Nikkarinen", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrej", |
| "middle": [], |
| "last": "Krizhanovsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Tiago", |
| "middle": [], |
| "last": "Pimentel", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucas", |
| "middle": [], |
| "last": "Torroba Hennigen", |
| "suffix": "" |
| }, |
| { |
| "first": "Christo", |
| "middle": [], |
| "last": "Kirov", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of the 17th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ekaterina Vylomova, Jennifer White, Elizabeth Salesky, Sabrina J. Mielke, Shijie Wu, Edoardo Ponti, Rowan Hall Maudslay, Ran Zmigrod, Joseph Valvoda, Svetlana Toldova, Francis Tyers, Elena Klyachko, Ilya Yegorov, Natalia Krizhanovsky, Paula Czarnowska, Irene Nikkarinen, Andrej Krizhanovsky, Tiago Pimentel, Lucas Torroba Hennigen, Christo Kirov, Garrett Nicolai, Ad- ina Williams, Antonios Anastasopoulos, Hilaria Cruz, Eleanor Chodroff, Ryan Cotterell, Miikka Silfverberg, and Mans Hulden. 2020. The SIG- MORPHON 2020 Shared Task 0: Typologically diverse morphological inflection. In Proceedings of the 17th SIGMORPHON Workshop on Compu- tational Research in Phonetics, Phonology, and Morphology.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Exact hard monotonic attention for character-level transduction", |
| "authors": [ |
| { |
| "first": "Shijie", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Cotterell", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1530--1537", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1148" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shijie Wu and Ryan Cotterell. 2019. Exact hard mono- tonic attention for character-level transduction. In Proceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 1530- 1537, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Applying the transformer to character-level transduction. CoRR, abs", |
| "authors": [ |
| { |
| "first": "Shijie", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Cotterell", |
| "suffix": "" |
| }, |
| { |
| "first": "Mans", |
| "middle": [], |
| "last": "Hulden", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shijie Wu, Ryan Cotterell, and Mans Hulden. 2020. Applying the transformer to character-level transduc- tion. CoRR, abs/2005.10213.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Breaking the beam search curse: A study of (re-)scoring methods and stopping criteria for neural machine translation", |
| "authors": [ |
| { |
| "first": "Yilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Mingbo", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "3054--3059", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1342" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yilin Yang, Liang Huang, and Mingbo Ma. 2018. Breaking the beam search curse: A study of (re- )scoring methods and stopping criteria for neural machine translation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Lan- guage Processing, pages 3054-3059, Brussels, Bel- gium. Association for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "html": null, |
| "content": "<table><tr><td/><td>Greedy</td><td/><td>Beam5</td><td/></tr><tr><td/><td>acc</td><td>dist</td><td>acc</td><td>dist</td></tr><tr><td>ang</td><td>0.574</td><td>0.76</td><td>0.578</td><td>0.75</td></tr><tr><td>azg</td><td>0.808</td><td>0.63</td><td>0.813</td><td>0.62</td></tr><tr><td>ceb</td><td>0.874</td><td>0.27</td><td>0.874</td><td>0.27</td></tr><tr><td>cly</td><td>0.653</td><td>0.72</td><td>0.657</td><td>0.71</td></tr><tr><td>cpa</td><td>0.651</td><td>0.52</td><td>0.653</td><td>0.52</td></tr><tr><td>czn</td><td>0.695</td><td>0.62</td><td>0.702</td><td>0.59</td></tr><tr><td>deu</td><td>0.883</td><td>0.19</td><td>0.882</td><td>0.19</td></tr><tr><td>dje</td><td>0.938</td><td>0.12</td><td>0.938</td><td>0.12</td></tr></table>", |
| "num": null, |
| "text": "Accuracy and Levenshtein distance on the test set for greedy and beam search with beam size 5 for HMMs.", |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "html": null, |
| "content": "<table/>", |
| "num": null, |
| "text": "Accuracy and Levenshtein distance on the test set for greedy and beam search with beam size 5 for Transformers.", |
| "type_str": "table" |
| } |
| } |
| } |
| } |