| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:31:48.142867Z" |
| }, |
| "title": "Getting the ##life out of living: How Adequate Are Word-Pieces for Modelling Complex Morphology?", |
| "authors": [ |
| { |
| "first": "Stav", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ilan University", |
| "location": {} |
| }, |
| "email": "klein.stav@gmail.com" |
| }, |
| { |
| "first": "Reut", |
| "middle": [], |
| "last": "Tsarfaty", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ilan University", |
| "location": {} |
| }, |
| "email": "reut.tsarfaty@gmail.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This work investigates the most basic units that underlie contextualized word embeddings, such as BERT-the so-called word pieces. In Morphologically-Rich Languages (MRLs) which exhibit morphological fusion and nonconcatenative morphology, the different units of meaning within a word may be fused, intertwined, and cannot be separated linearly. Therefore, when using word-pieces in MRLs, we must consider that: (1) a linear segmentation into sub-word units might not capture the full morphological complexity of words; and (2) representations that leave morphological knowledge on sub-word units inaccessible might negatively affect performance. Here we empirically examine the capacity of wordpieces to capture morphology by investigating the task of multi-tagging in Hebrew, as a proxy to evaluating the underlying segmentation. Our results show that, while models trained to predict multi-tags for complete words outperform models tuned to predict the distinct tags of WPs, we can improve the WPs tag prediction by purposefully constraining the wordpieces to reflect their internal functions. We conjecture that this is due to the na\u00efve linear tokenization of words into word-pieces, and suggest that linguistically-informed word-pieces schemes, that make morphological knowledge explicit, might boost performance for MRLs.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This work investigates the most basic units that underlie contextualized word embeddings, such as BERT-the so-called word pieces. In Morphologically-Rich Languages (MRLs) which exhibit morphological fusion and nonconcatenative morphology, the different units of meaning within a word may be fused, intertwined, and cannot be separated linearly. Therefore, when using word-pieces in MRLs, we must consider that: (1) a linear segmentation into sub-word units might not capture the full morphological complexity of words; and (2) representations that leave morphological knowledge on sub-word units inaccessible might negatively affect performance. Here we empirically examine the capacity of wordpieces to capture morphology by investigating the task of multi-tagging in Hebrew, as a proxy to evaluating the underlying segmentation. Our results show that, while models trained to predict multi-tags for complete words outperform models tuned to predict the distinct tags of WPs, we can improve the WPs tag prediction by purposefully constraining the wordpieces to reflect their internal functions. We conjecture that this is due to the na\u00efve linear tokenization of words into word-pieces, and suggest that linguistically-informed word-pieces schemes, that make morphological knowledge explicit, might boost performance for MRLs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Contextualized word-embedding models, such as BERT (Devlin et al., 2019) and XLNet (Yang et al., 2019) , rely on sub-word units called wordpieces (Johnson et al., 2017) , that enable these models to generalize over frequent charactersequences and elegantly handle out-of-vocabulary items (with minimal resort to character-based models). This word-pieces architecture helps the models make better predictions for complete words without the need to keep a large dictionary for all the possible word-forms in a language.", |
| "cite_spans": [ |
| { |
| "start": 51, |
| "end": 72, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 83, |
| "end": 102, |
| "text": "(Yang et al., 2019)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 146, |
| "end": 168, |
| "text": "(Johnson et al., 2017)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Effectively analyzing the internal content of words is important for Morphologically-Rich Languages (MRLs) (Tsarfaty et al., 2010) , that express multiple units of meaning at word level. Due to morphological ambiguity, the interpretation of the many functions of a complete word has to be determined in the context of the utterance, making explicit the contribution of each linguistic sub-word unit (a.k.a., morpheme) to the global meaning.", |
| "cite_spans": [ |
| { |
| "start": 107, |
| "end": 130, |
| "text": "(Tsarfaty et al., 2010)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this study we aim to investigate how well morphological information is captured by contextualized embedding models, or, more specifically, by their underlying word-pieces. We hypothesize that the word-pieces tokenization scheme in these models, which is not reflective of the actual morphology, will decrease the models ability to predict morphological functions on sub-word units.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In order to test this hypothesis we use Multilingual BERT (Devlin et al., 2019) on the task of multi-tagging raw words in a morphologically rich and ambiguous language, Modern Hebrew. Pre-neural studies on Hebrew found that explicitly modeling sub-word morphological information, substantially improves results on tagging and parsing down the NLP pipeline (More and Tsarfaty, 2016; More et al., 2019) . Here our results show a significant drop in multi-tagging accuracy in word-level settings compared to settings where we aim to tag the distinct WPs. Nevertheless, when we purposefully incorporate morphological knowledge that reflect the internal functions of WPs, the tagging of WPs substantially improves.", |
| "cite_spans": [ |
| { |
| "start": 58, |
| "end": 79, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 356, |
| "end": 381, |
| "text": "(More and Tsarfaty, 2016;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 382, |
| "end": 400, |
| "text": "More et al., 2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We conjecture that current word-pieces architectures might be sub-optimal for capturing complex (e.g., fusional) morphology, and that more morphologically-informed schemes may yield better models, at least for MRLs. Morphologically-Rich languages (MRLs) (Tsarfaty et al., 2010) are languages that express syntactic relations by inflection or agglutination at word level. In NLP, MRLs often require segmentation into sub-word units called morphemes as part of the pre-processing in the NLP pipelines. The term morphological fusion, or simply fusion, refers to the degree to which morphemes are connected to a word host or stem (Bickel and Nichols, 2013) . There are three values for the degree of fusion: isolating (low), concatenative (mild) and non-concatenative (high). MRLs thus belong to the mild-and high-fusion language groups. In concatenative MRLs like Turkish (Swift, 1963) and Russian (Wade, 1992; Shevelov, 1957) morphemes are linearly connected to the stem, and so a concatenated word-form can easily be segmented back into its composing morphemes. Segmenting highly fusional MRLs (henceforth fMRL), like Hebrew (Berman and Bolozky, 1978) , is not as simple, since words can be affixed in such a way that makes the stem and/or affix undergo morpho-phonological changes resulting in ambiguous, syncretic word-forms. These changes cannot be restored without morphological disambiguation of the word in context of the whole sentence. Furthermore, word-forms may involve a combination of a root and a template which are intertwined via a non-concatenative process, and both contribute meaning to the word-form.", |
| "cite_spans": [ |
| { |
| "start": 254, |
| "end": 277, |
| "text": "(Tsarfaty et al., 2010)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 626, |
| "end": 652, |
| "text": "(Bickel and Nichols, 2013)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 869, |
| "end": 882, |
| "text": "(Swift, 1963)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 887, |
| "end": 907, |
| "text": "Russian (Wade, 1992;", |
| "ref_id": null |
| }, |
| { |
| "start": 908, |
| "end": 923, |
| "text": "Shevelov, 1957)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1124, |
| "end": 1150, |
| "text": "(Berman and Bolozky, 1978)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Let us consider two examples for high fusion morphological phenomena in Modern Hebrew. First, consider the word-form \u202b.\u05d1\u05e6\u05dc\u202c It can either mean 'in their shadow' ( \u202b-\u05d1\u202cPreposition \u2227 \u202b-\u05e6\u05dc\u202c Noun \u2227 \u202b-\u05e9\u05dc\u05d4\u202cPossessive), 'their onion' ( \u202b-\u05d1\u05e6\u05dc\u202c Noun \u2227 \u202b-\u05e9\u05dc\u05d4\u202cPossessive)), 'in the photographer' ( \u202b-\u05d1\u202cPreposition \u2227 \u202b-\u05d4\u202cDefinite \u2227 \u202b-\u05e6\u05dc\u202cNoun) or 'Betselem' ( \u202b-\u05d1\u05e6\u05dc\u202cProper Noun, a known organization). The differences between the actual word-form \u202b\u05d1\u05e6\u05dc\u202c and the segments representing the composing morphemes in the different analyses, illustrate how complex morphological processes in Hebrew dictate the final word form -that is, the final form is no longer re-constructable by (simply concatenating) the morphological segments. Among the different analyses, no interpretation is a-priori more likely than others. Only in context the correct analysis can be determined.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Next, let us consider the following two verbs: \u202b\u05e9\u05d5\u05de\u05e8\u202c ('/somer', keep.PRES.MASC.SG, 'keeps') and \u202b\u05e9\u05de\u05d5\u05e8\u202c \u202b\u05e0\u202c ('ni-/smor', 1st.PL.FUT-keep.FUT, 'we will keep'). Here, although the affixes \u202b,\u05d5\u202c \u202b\u05e0\u202c can be separated from the root letters \u202b,\u05e9\u05de\u05e8\u202c the analysis of the verb cannot be constructed by analyzing the mere character sequences, it must be understood from the unified form of the morphemes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "So, from the first example, we observe that morphological disambiguation is crucial, and that contextualized models may actually be good candidates for morphological disambiguation where the external context is crucial. But from the second example, we learn that the linear order and strict separation of words into word-pieces, as is done in current contextualized embeddings, may be too arbitrary and too strict, which may in turn undermine the performance of tasks down the NLP pipeline, particularly for fMRLs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The Goal This paper aims to investigate whether word pieces capture sufficient morphological information about whole words. That is, we ask whether the information contained in such representations would allow to predict the multiple functions of an input, i.e. a space-delimited word-form. In particular, we empirically examine this capacity via the task of multi-tag assignment in Hebrew -where each multi-tag reflects the analyses of a single word-form bearing multiple POS tags -as illustrated in our Hebrew example in section 2. We conduct a series of experiments on multi POS-tag assignment to raw word forms in Hebrew texts, changing the granularity of the input and the output to reflect word-internal functions that are potentially captured by individual word-pieces.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Question: How Adequate are Word Pieces for Modeling Morphology", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The Task We define a multitag as a single label that consists of the multiple POS tags reflecting the categories of the (morphological) segments of a word-form. For example, we assign the wordform \u202b,\u05d1\u05d1\u05d9\u05ea\u202c which means 'in the house', the multitag IN \u2227 DEF \u2227 NN. In all of our experiments, the model receives as input a sentence that underwent a tokenization into word pieces by the built-in tokenizer of mBERT (Wolf et al., 2019) . We then output a multitag for each word as whole. Our models vary in how much (and what kind of) information is predicted for each of the word-pieces.", |
| "cite_spans": [ |
| { |
| "start": 409, |
| "end": 428, |
| "text": "(Wolf et al., 2019)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Question: How Adequate are Word Pieces for Modeling Morphology", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Experimental Setup We use the Hebrew section of the SPMRLs treebank, which consists of 6500 sentences from the daily newspaper Ha'aretz (Sima'an et al., 2001) . This corpus was manually annotated for POS tags at morpheme-level by trained experts, and it is the accepted benchmark for all morphological processing tasks in Hebrew. We fine-tune the models using the Pytorch implementation of transformers by Wolf et al. (2019) . We use its standard BertTokenizer and BertForTo-kenClassification, with multilingual BERT (cased) as our model for fine-tuning. We use the standard train set as input for finetuning, and evaluate and report results on the dev set. We report on two measures. The first is Exact Match (EM), that is, the percentage of correct multitag assignments from all multitag assignments to word-forms in the evaluation set.", |
| "cite_spans": [ |
| { |
| "start": 136, |
| "end": 158, |
| "text": "(Sima'an et al., 2001)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 406, |
| "end": 424, |
| "text": "Wolf et al. (2019)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Question: How Adequate are Word Pieces for Modeling Morphology", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EM = # correct multitags # words (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Question: How Adequate are Word Pieces for Modeling Morphology", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The second is Existence F1: precision and recall on the existence of correct POS tags in a (possible incorrect) multitag assignment. We compute Existence F1 based on the precision and recall that follow. For calculating the precision and recall the predicted multitag is split into its composing simple POS tags. Note that F1 gives partial credit on correctly identified POS in the case of partial identification or wrong order, while EM doesn't. 3.1 Models", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Question: How Adequate are Word Pieces for Modeling Morphology", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We begin with an Oracle scenario that emulates an English-like POS tagging scenario, where the input is a sequence of strings, in our case gold presegmented morphemes, and the output is a single POS tag per segment. For fine-tuning, we use presegmented words along with their corresponding POS tags, as it is gold-annotated in our training data. It should be noted that these segments undergo additional tokenization into word pieces by mBERT's tokenizer, based on its internal wordpieces lexicon, prior to fine-tuning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Oracle", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "For comparability with the other models, the evaluation is done on raw words i.e., we combine the predicted simple tags into a multitag and compare it to the original multitag per word. This scenario is of course not realistic, in the sense that gold segmented data at morpheme level are slow and costly to deliver. However, this setting provides an empirical upper-bound for the performance of BERT on a simple POS tagging in Hebrew. We hypothesize that, had BERT's tokenization into word pieces been morphologically informed, the model's accuracy in word-level settings could rise up to the level of performance on this pre-segmented Oracle scenario.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Oracle", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "Moving on to a realistic scenario, in our next task the input to the model is a sequence of raw word forms, and the output is a sequence of multi-tags, one multi-tag (i.e., multiple POS) per word. During fine-tuning, each word piece (WP) is assigned the multitag of the complete original word. Unlike the Oracle setting, where the input for fine-tuning reflected morphological phenomena, here no morphological knowledge is incorporated at all. During inference, the input is composed of raw words which undergo BERT's tokenization into wordpieces (WP), and each WP gets assigned one of the multi-tags encountered during fine-tuning. The goal here is to examine the ability of the BERT-based representations to cope with a large space of complex labels (multi-tags) that re- sult from different morphological (and morphophonological) processes that construct words in an MRL. This setting has several drawbacks; first, it is unable to generalize to an unseen composition of tagged-pieces into a new multitag, and second, throughout the process, the internal morphological segmentation of the tokens remains inaccessible.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word-Level Multi-tagging", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "Retaining our realistic settings, where the input is composed of raw words, we split multi-tagging into two independent tasks. One predicts the multi-tag reflecting the prefix of that word, and the other predicts the multi-tag of its host (plus pronominal clitics). 1 The input for fine-tuning, in both cases, presents raw words having undergone BERT's tokenization, and each WP is assigned the multi-tag of the Prefix (/Host) of that word.", |
| "cite_spans": [ |
| { |
| "start": 266, |
| "end": 267, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Prefix/Host Multi-tagging", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "For the prefix task, we implemented a function that looks for all known tags that represent prefixes in Hebrew, and truncated the complete multitag of the word to include only them. For instance, a word that is assigned the multi-tag IN \u2227 DEF \u2227 NN will now get assigned the multi-tag IN \u2227 DEF. Words that don't contain a prefix get assigned the label '-'. Likewise for the host, words are assigned only the part of the multi-tag that doesn't contain prefix tags. For the above example, this would simply be NN. Fine-tuning is performed independently for each of the tasks. At inference time, the predictions for the prefix and host are combined into a single multi-tag, compared against the gold multi-tag for evaluation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Prefix/Host Multi-tagging", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "One technical advantage of this setting is that it substantially limits the label-space that needs to be learned per word. Also, unlike the previous scenario, the model is able to generate unseen multitags (to some extent) by creating previously unseen Prefix-Host compositions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Prefix/Host Multi-tagging", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "In this scenario we aim to assign to each WP a single tag that corresponds to the actual function of that WP.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decomposed Multi-tagging", |
| "sec_num": "3.1.4" |
| }, |
| { |
| "text": "For fine-tuning, we use the same data as in the Oracle scenario. That is, we use pre-segmented morphemes that undergo BERT's tokenization, paired with their corresponding tags, a single tag per WP. Now, at inference time, whole words undergo BERT's tokenization into word-pieces. Since the model was trained (fine-tuned) to predict a single tag per word-piece, the hope is that we could predict the single tag that reflects the function of this specific WP. We then combine all the (unique) predictions for all the WPs in the word to concatenate them to a single multi-tag.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decomposed Multi-tagging", |
| "sec_num": "3.1.4" |
| }, |
| { |
| "text": "This setting tests whether the tokenization algorithm outputs WPs that are reflective of the actual morphemes the model was fine-tuned on. If this is the case, predicting a single POS tag per WP would perform similarly to the Oracle setting. Howvere, since the internal decomposition of the words at inference time is determined solely by BERT's WPs, any diversion between the WP tokenization and the gold morphological decomposition is expected to negatively affect performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decomposed Multi-tagging", |
| "sec_num": "3.1.4" |
| }, |
| { |
| "text": "Decomposed Multi-tagging", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Morphologically-Informed", |
| "sec_num": "3.1.5" |
| }, |
| { |
| "text": "Here again the input for the task consists of raw words, tokenized by BERT into word-pieces. As output we now aim to assign each word-piece a multi-tag that reflects exactly its own content. The input to fine-tuning thus has to be modified. We use raw words having undergone BERT's tokenization into WPs, and each WP is assigned a multitag label that reflects the actual POS tag(s) that this part of the word contains (an informed multi-tag). We obtain these informed multi-tags using a deterministic procedure that compares the WPs proposed by BERT to the gold morphological segmentation we have for the training data. During training, we can unambiguously detect which morphemes are relevant for the WP only, and the WP gets assigned the multi-tag of the actual morphemes it contains. At inference time we provide BERT-tokenized words as input, and each WP gets assigned an informed multi-tag as observed during fine-tuning. For evaluation, we combine the prediction made on all WPs of a word to a single ordered multi-tag, and compare it to the gold multi-tag of that word. Interestingly, this setting can potentially generate previously unseen multitags, and it maximizes the extent to which we can access word-internal structure during fine-tuning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Morphologically-Informed", |
| "sec_num": "3.1.5" |
| }, |
| { |
| "text": "The input, output and training regimes for our models are illustrated in Table 1 . Table 2 presents the results on multi-tagging for all of our models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 73, |
| "end": 80, |
| "text": "Table 1", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 83, |
| "end": 90, |
| "text": "Table 2", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "As expected, the Oracle scenario assigning single tags to gold segments outperformed all other models that aim to multi-tag complete words. For word-level multi-tagging, the word-level model performed at the same level as the Prefix/Host model -narrowing down the labels' space in this fashion does not seem to improve results or provide any further generalization capacity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Purposefully fine-tuning our model to assign a single POS tag per WP (trained on our gold morphological data) did not help, in fact it dramatically hurts performance. This indicates that WPs in and of themselves do not coincide with the notion of morphemes. Curiously though, informing BERT's WPs as to their own internal function prior to fine-tuning significantly improves the results compared to the model trained to assign a POS-per-WP based on gold morphology.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "This last result suggests that, while current WPs do not reflect morphological structure and lose morphological distinctions in their sub-word units, informing these word-units as to their own internal functions can provide a major performance boost. So far, we only incorporated such morphological information during fine-tuning. We conjecture that informing the WP algorithm earlier on, prior to pre-training, with a linguistically-informed decomposition into WPs, may greatly advance the performance of contextualized models for fMRLs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Although the term 'word pieces' was only coined in 2017, by Johnson et al. (2017) , the idea that sub-word segmentation might be useful for downstream tasks was already well-known and studied, especially in the field on Neural Machine Translation. In 2010 Luong et al. (2010) explicitly showed that incorporating morphological knowledge in the translation process significantly improves translation. In 2017 Belinkov et al. (2017) found that for learning morphology it is better to use character based representation rather than word-based ones. They also found that neural networks encode morphology in the lower layers of the network, which might explain why mere finetuning is insufficient to capture morphological complexity. Later, Straka et al. (2019) achieved SoTA on POS tagging on 54 languages, including Heberew, but was using BERT embeddings along with character level embeddings and Fasttext (Bojanowski et al., 2017) word embeddings on gold morphology, which strengthen our claim that word pieces by themselves don't capture morphology well. This was also supported by Mielke and Eisner (2019), that explicitly mentioned the non-concatenativity of Hebrew and Arabic as the major drawback of sub word tokenization systems.", |
| "cite_spans": [ |
| { |
| "start": 60, |
| "end": 81, |
| "text": "Johnson et al. (2017)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 256, |
| "end": 275, |
| "text": "Luong et al. (2010)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 408, |
| "end": 430, |
| "text": "Belinkov et al. (2017)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 737, |
| "end": 757, |
| "text": "Straka et al. (2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 904, |
| "end": 929, |
| "text": "(Bojanowski et al., 2017)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In this work we examined the adequacy of BERT's word-pieces as sub-word units for representing complex morphology. We chose to investigate multi-tagging in a high fusional language, as a proxy for assessing the underlying segmentation into distinct morphemes. We expected that if distinct word-pieces indeed reflect units of meaning, then tagging them would be as accurate as it is for languages that assign a single tag per word. Our results show that the current word pieces do not reflect actual morphology, resulting in decreased performance for tagging complex Hebrew words. Nonetheless, we found that imposing morphological knowledge during fine-tuning (an Informed setup) is indeed helpful, albeit a bit late. We conjecture that pre-training with a morphologicallyinformed word-pieces scheme that reflects a complex morphological reality, has the potential to improve multi-tagging, as well as other tasks down the pipeline, in Hebrew and other fMRLs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Since Hebrew can stack prefixes before a host, the prefixes require a multi-tag. Similarly, hosts with pronominal clitics may also be assigned a multi-tag rather than one tag.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank Yoav Goldberg, Noah Smith, Omer Levy and three reviewers for interesting discussions of an earlier draft. This research is funded by an ERC Grant #677352 and an ISF grant #1739/26, for which we are grateful.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "What do neural machine translation models learn about morphology?", |
| "authors": [ |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Nadir", |
| "middle": [], |
| "last": "Durrani", |
| "suffix": "" |
| }, |
| { |
| "first": "Fahim", |
| "middle": [], |
| "last": "Dalvi", |
| "suffix": "" |
| }, |
| { |
| "first": "Hassan", |
| "middle": [], |
| "last": "Sajjad", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "861--872", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-1080" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonatan Belinkov, Nadir Durrani, Fahim Dalvi, Hassan Sajjad, and James Glass. 2017. What do neural ma- chine translation models learn about morphology? In Proceedings of the 55th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 861-872, Vancouver, Canada. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Modern Hebrew structure", |
| "authors": [ |
| { |
| "first": "Shmuel", |
| "middle": [], |
| "last": "Ruth Aronson Berman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bolozky", |
| "suffix": "" |
| } |
| ], |
| "year": 1978, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ruth Aronson Berman and Shmuel Bolozky. 1978. Modern Hebrew structure. University Pub. Projects.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "The World Atlas of Language Structures Online", |
| "authors": [ |
| { |
| "first": "Balthasar", |
| "middle": [], |
| "last": "Bickel", |
| "suffix": "" |
| }, |
| { |
| "first": "Johanna", |
| "middle": [], |
| "last": "Nichols", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Balthasar Bickel and Johanna Nichols. 2013. Fusion of selected inflectional formatives. In Matthew S. Dryer and Martin Haspelmath, editors, The World Atlas of Language Structures Online. Max Planck Institute for Evolutionary Anthropology, Leipzig.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Enriching word vectors with subword information", |
| "authors": [ |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "5", |
| "issue": "", |
| "pages": "135--146", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the Associa- tion for Computational Linguistics, 5:135-146.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Google's multilingual neural machine translation system: Enabling zero-shot translation", |
| "authors": [ |
| { |
| "first": "Melvin", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [ |
| "V" |
| ], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxim", |
| "middle": [], |
| "last": "Krikun", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikhil", |
| "middle": [], |
| "last": "Thorat", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernanda", |
| "middle": [], |
| "last": "Vi\u00e9gas", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Wattenberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Macduff", |
| "middle": [], |
| "last": "Hughes", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "5", |
| "issue": "", |
| "pages": "339--351", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00065" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Melvin Johnson, Mike Schuster, Quoc V. Le, Maxim Krikun, Yonghui Wu, Zhifeng Chen, Nikhil Thorat, Fernanda Vi\u00e9gas, Martin Wattenberg, Greg Corrado, Macduff Hughes, and Jeffrey Dean. 2017. Google's multilingual neural machine translation system: En- abling zero-shot translation. Transactions of the As- sociation for Computational Linguistics, 5:339-351.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "A hybrid morpheme-word representation for machine translation of morphologically rich languages", |
| "authors": [ |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "Min-Yen", |
| "middle": [], |
| "last": "Kan", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "148--157", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minh-Thang Luong, Preslav Nakov, and Min-Yen Kan. 2010. A hybrid morpheme-word representation for machine translation of morphologically rich lan- guages. In Proceedings of the 2010 Conference on Empirical Methods in Natural Language Process- ing, pages 148-157. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Spell once, summon anywhere: A two-level open-vocabulary language model", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Sabrina", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Mielke", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Eisner", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sabrina J. Mielke and Jason Eisner. 2019. Spell once, summon anywhere: A two-level open-vocabulary language model. In AAAI.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Joint transition-based models for morpho-syntactic parsing: Parsing strategies for MRLs and a case study from modern Hebrew", |
| "authors": [ |
| { |
| "first": "Amir", |
| "middle": [], |
| "last": "More", |
| "suffix": "" |
| }, |
| { |
| "first": "Amit", |
| "middle": [], |
| "last": "Seker", |
| "suffix": "" |
| }, |
| { |
| "first": "Victoria", |
| "middle": [], |
| "last": "Basmova", |
| "suffix": "" |
| }, |
| { |
| "first": "Reut", |
| "middle": [], |
| "last": "Tsarfaty", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "7", |
| "issue": "", |
| "pages": "33--48", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00253" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amir More, Amit Seker, Victoria Basmova, and Reut Tsarfaty. 2019. Joint transition-based models for morpho-syntactic parsing: Parsing strategies for MRLs and a case study from modern Hebrew. Transactions of the Association for Computational Linguistics, 7:33-48.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Data-driven morphological analysis and disambiguation for morphologically rich languages and universal dependencies", |
| "authors": [ |
| { |
| "first": "Amir", |
| "middle": [], |
| "last": "More", |
| "suffix": "" |
| }, |
| { |
| "first": "Reut", |
| "middle": [], |
| "last": "Tsarfaty", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "337--348", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amir More and Reut Tsarfaty. 2016. Data-driven mor- phological analysis and disambiguation for morpho- logically rich languages and universal dependencies. In Proceedings of COLING 2016, the 26th Inter- national Conference on Computational Linguistics: Technical Papers, pages 337-348, Osaka, Japan. The COLING 2016 Organizing Committee.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "The structure of the root in modern russian", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "George", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Shevelov", |
| "suffix": "" |
| } |
| ], |
| "year": 1957, |
| "venue": "The Slavic and East European Journal", |
| "volume": "1", |
| "issue": "2", |
| "pages": "106--124", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George Y Shevelov. 1957. The structure of the root in modern russian. The Slavic and East European Journal, 1(2):106-124.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Building a tree-bank of modern hebrew text", |
| "authors": [ |
| { |
| "first": "Khalil", |
| "middle": [], |
| "last": "Sima'an", |
| "suffix": "" |
| }, |
| { |
| "first": "Alon", |
| "middle": [], |
| "last": "Itai", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoad", |
| "middle": [], |
| "last": "Winter", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Traitement Automatique des Langues", |
| "volume": "42", |
| "issue": "2", |
| "pages": "247--380", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Khalil Sima'an, Alon Itai, Yoad Winter, Alon Alt- man, and Noa Nativ. 2001. Building a tree-bank of modern hebrew text. Traitement Automatique des Langues, 42(2):247-380.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Evaluating contextualized embeddings on 54 languages in pos tagging, lemmatization and dependency parsing", |
| "authors": [ |
| { |
| "first": "Milan", |
| "middle": [], |
| "last": "Straka", |
| "suffix": "" |
| }, |
| { |
| "first": "Jana", |
| "middle": [], |
| "last": "Strakov\u00e1", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1908.07448" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Milan Straka, Jana Strakov\u00e1, and Jan Haji\u010d. 2019. Evaluating contextualized embeddings on 54 lan- guages in pos tagging, lemmatization and depen- dency parsing. arXiv preprint arXiv:1908.07448.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "A Reference Grammar of Modern Turkish", |
| "authors": [ |
| { |
| "first": "Lloyd", |
| "middle": [ |
| "B" |
| ], |
| "last": "Swift", |
| "suffix": "" |
| } |
| ], |
| "year": 1963, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lloyd B. Swift. 1963. A Reference Grammar of Mod- ern Turkish. Indiana University Press, Bloomington.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Statistical parsing of morphologically rich languages (spmrl): what, how and whither", |
| "authors": [ |
| { |
| "first": "Reut", |
| "middle": [], |
| "last": "Tsarfaty", |
| "suffix": "" |
| }, |
| { |
| "first": "Djam\u00e9", |
| "middle": [], |
| "last": "Seddah", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandra", |
| "middle": [], |
| "last": "K\u00fcbler", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie", |
| "middle": [], |
| "last": "Candito", |
| "suffix": "" |
| }, |
| { |
| "first": "Jennifer", |
| "middle": [], |
| "last": "Foster", |
| "suffix": "" |
| }, |
| { |
| "first": "Yannick", |
| "middle": [], |
| "last": "Versley", |
| "suffix": "" |
| }, |
| { |
| "first": "Ines", |
| "middle": [], |
| "last": "Rehbein", |
| "suffix": "" |
| }, |
| { |
| "first": "Lamia", |
| "middle": [], |
| "last": "Tounsi", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the NAACL HLT 2010 First Workshop on Statistical Parsing of Morphologically-Rich Languages", |
| "volume": "", |
| "issue": "", |
| "pages": "1--12", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Reut Tsarfaty, Djam\u00e9 Seddah, Yoav Goldberg, San- dra K\u00fcbler, Marie Candito, Jennifer Foster, Yannick Versley, Ines Rehbein, and Lamia Tounsi. 2010. Sta- tistical parsing of morphologically rich languages (spmrl): what, how and whither. In Proceedings of the NAACL HLT 2010 First Workshop on Statistical Parsing of Morphologically-Rich Languages, pages 1-12. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "A Comprehensive Russian Grammar", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [ |
| "B" |
| ], |
| "last": "Terence", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Wade", |
| "suffix": "" |
| } |
| ], |
| "year": 1992, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Terence L. B. Wade. 1992. A Comprehensive Russian Grammar. Blackwell, Oxford. Reprinted in 1995.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Huggingface's transformers: State-of-the-art natural language processing", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Clement", |
| "middle": [], |
| "last": "Delangue", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Moi", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierric", |
| "middle": [], |
| "last": "Cistac", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Rault", |
| "suffix": "" |
| }, |
| { |
| "first": "R'emi", |
| "middle": [], |
| "last": "Louf", |
| "suffix": "" |
| }, |
| { |
| "first": "Morgan", |
| "middle": [], |
| "last": "Funtowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Jamie", |
| "middle": [], |
| "last": "Brew", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R'emi Louf, Morgan Funtow- icz, and Jamie Brew. 2019. Huggingface's trans- formers: State-of-the-art natural language process- ing. ArXiv, abs/1910.03771.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
| "authors": [ |
| { |
| "first": "Zhilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihang", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Russ", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc V", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5754--5764", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Russ R Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. In Advances in neural in- formation processing systems, pages 5754-5764.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "text": "P recision = # correctly predicted individual POS tags # individual POS tags in all multitag assignments (2) Recall = # correctly predicted individual POS tags # individual POS tags in all multitags in the evaluation set (3)", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "TABREF0": { |
| "html": null, |
| "num": null, |
| "text": "The Data: All Analytic Languages are Alike, Each MRL Is Rich In Its Own Way", |
| "type_str": "table", |
| "content": "<table/>" |
| }, |
| "TABREF2": { |
| "html": null, |
| "num": null, |
| "text": "", |
| "type_str": "table", |
| "content": "<table/>" |
| }, |
| "TABREF4": { |
| "html": null, |
| "num": null, |
| "text": "Empirical Results. We report EM and F1 on raw-words' multi-tags, for all models and training regimes.", |
| "type_str": "table", |
| "content": "<table/>" |
| } |
| } |
| } |
| } |