| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T03:14:59.194910Z" |
| }, |
| "title": "MTLB-STRUCT @PARSEME 2020: Capturing Unseen Multiword Expressions Using Multi-task Learning and Pre-trained Masked Language Models", |
| "authors": [ |
| { |
| "first": "Shiva", |
| "middle": [], |
| "last": "Taslimipoor", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "ALTA Institute University of Cambridge", |
| "location": { |
| "country": "UK" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Bahaadini", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Ekaterina", |
| "middle": [], |
| "last": "Kochmar", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "ALTA Institute University of Cambridge", |
| "location": { |
| "country": "UK" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper describes a semi-supervised system that jointly learns verbal multiword expressions (VMWEs) and dependency parse trees as an auxiliary task. The model benefits from pre-trained multilingual BERT. BERT hidden layers are shared among the two tasks and we introduce an additional linear layer to retrieve VMWE tags. The dependency parse tree prediction is modelled by a linear layer and a bilinear one plus a tree CRF on top of BERT. The system has participated in the open track of the PARSEME shared task 2020 and ranked first in terms of F1-score in identifying unseen VMWEs as well as VMWEs in general, averaged across all 14 languages.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper describes a semi-supervised system that jointly learns verbal multiword expressions (VMWEs) and dependency parse trees as an auxiliary task. The model benefits from pre-trained multilingual BERT. BERT hidden layers are shared among the two tasks and we introduce an additional linear layer to retrieve VMWE tags. The dependency parse tree prediction is modelled by a linear layer and a bilinear one plus a tree CRF on top of BERT. The system has participated in the open track of the PARSEME shared task 2020 and ranked first in terms of F1-score in identifying unseen VMWEs as well as VMWEs in general, averaged across all 14 languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "In addition to other challenges in multiword expression (MWE) processing that were addressed in previous work, such as non-compositionality (Salehi et al., 2014) , discontinuity Waszczuk, 2018) , and syntactic variability (Pasquer et al., 2018 ), The PARSEME shared task edition 1.2 1 has focused on another prominent challenge in detecting MWEs, namely detection of unseen MWEs. The problem with unseen data is common for many NLP tasks. While rule-based and unsupervised ML approaches are less affected by unseen data, supervised ML techniques are often found to be prone to overfitting. In this respect, the introduction of language modelling objectives to be added to different NLP tasks and their effect on generalisation have shown promising results (Rei, 2017) . Further improvements brought by pre-trained language models made them a popular approach to a multitude of NLP tasks (Devlin et al., 2019) . One particular advantage of such models is that they facilitate generalisation beyond task-specific annotations (Pires et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 140, |
| "end": 161, |
| "text": "(Salehi et al., 2014)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 178, |
| "end": 193, |
| "text": "Waszczuk, 2018)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 222, |
| "end": 243, |
| "text": "(Pasquer et al., 2018", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 756, |
| "end": 767, |
| "text": "(Rei, 2017)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 887, |
| "end": 908, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1023, |
| "end": 1043, |
| "text": "(Pires et al., 2019)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "MWEs are inherent in all natural languages and distinguishable for their syntactic and semantic idiosyncracies (Baldwin and Kim, 2010; Fazly et al., 2009) . Since language models are good at capturing syntactic and semantic features, we believe they are a suitable approach for modelling MWEs. In particular, our system relies on BERT pre-trained language models (Devlin et al., 2019) . Additionally, we render the system semi-supervised by means of multi-task learning. The most promising feature to be jointly learned with MWEs is dependency parse information (Constant and Nivre, 2016) . Accordingly, we fine-tune BERT for two different objectives: MWE detection and dependency parsing. MWE learning is done via token classification using a linear layer on top of BERT, and dependency parse trees are learned using dependency tree CRF network (Rush, 2020) . Our experiments confirm that this joint learning architecture is effective for capturing MWEs in most languages represented in the shared task. 2", |
| "cite_spans": [ |
| { |
| "start": 111, |
| "end": 134, |
| "text": "(Baldwin and Kim, 2010;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 135, |
| "end": 154, |
| "text": "Fazly et al., 2009)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 363, |
| "end": 384, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 562, |
| "end": 588, |
| "text": "(Constant and Nivre, 2016)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 846, |
| "end": 858, |
| "text": "(Rush, 2020)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In earlier systems, MWEs were extracted using pre-defined patterns or statistical measures that either indicated associations among MWE components or (non-)compositionality of the expressions with regard to the components (Ramisch et al., 2010) . For example, Cordeiro et al. (2016) employed such a This work is licensed under a Creative Commons Attribution 4.0 International Licence. Licence details: http:// creativecommons.org/licenses/by/4.0/.", |
| "cite_spans": [ |
| { |
| "start": 222, |
| "end": 244, |
| "text": "(Ramisch et al., 2010)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 260, |
| "end": 282, |
| "text": "Cordeiro et al. (2016)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "1 http://hdl.handle.net/11234/1-3367 2 The code for the system and configuration files for different languages are available at https://github.com/ shivaat/MTLB-STRUCT/ system for identifying MWEs. While these models can be effective for some frequent MWEs, their main disadvantage is that they capture MWE types (as opposed to tokens) and they are unable to take context into account in running texts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The use of supervised machine learning was facilitated by the availability of resources tagged for MWEs (Schneider et al., 2014; Savary et al., 2017; . Al Saied et al. 2017proposed a transition-based system based on an arc-standard dependency parser (Nivre, 2004) which ranked first in the first edition of PARSEME shared task on automatic identification of verbal MWEs (VMWEs) (Savary et al., 2017) . Taslimipoor and Rohanian (2018) proposed a CNN-LSTM system which exploited fastText word representations and ranked first in the open track of the PARSEME shared task edition 1.1 . Previous systems such as TRAVERSAL (Waszczuk, 2018) (ranked first in the closed track of the PARSEME shared task edition 1.1), and CRF-Seq/Dep (Moreau et al., 2018) employed tree CRF using dependency parse features in non-deep learning settings. They showed strengths of this approach particularly in the case of discontinuous VMWEs. In SHOMA (Taslimipoor and Rohanian, 2018) , using a linear-chain CRF layer on top of the CNN-biLSTM model did not result in improvements. In this work, we use tree CRF, implemented as part of the Torch-Struct library (Rush, 2020) , to model dependency trees, and we show that when it is jointly trained with a transformer-based MWE detection system, it improves MWE prediction for a number of languages.", |
| "cite_spans": [ |
| { |
| "start": 104, |
| "end": 128, |
| "text": "(Schneider et al., 2014;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 129, |
| "end": 149, |
| "text": "Savary et al., 2017;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 250, |
| "end": 263, |
| "text": "(Nivre, 2004)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 378, |
| "end": 399, |
| "text": "(Savary et al., 2017)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 402, |
| "end": 433, |
| "text": "Taslimipoor and Rohanian (2018)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 618, |
| "end": 634, |
| "text": "(Waszczuk, 2018)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 726, |
| "end": 747, |
| "text": "(Moreau et al., 2018)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 926, |
| "end": 958, |
| "text": "(Taslimipoor and Rohanian, 2018)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 1134, |
| "end": 1146, |
| "text": "(Rush, 2020)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Recently, Savary et al. (2019) proposed that learning MWE lexicons in an unsupervised setting is an important step that can be used in combination with a supervised model, especially when the latter is trained on a small amount of data. While we do not specifically learn MWE lexicons from external unannotated data, we believe that state-of-the-art pre-trained language representation models can capture crucial information about MWEs similar to other NLP phenomena (Peters et al., 2017) . For instance, Peters et al. 2017showed how a semi-supervised system may benefit from pre-trained language model-based embeddings for named entity recognition (NER) and chunking. The joint learning of MWEs and dependency parsing has been proved effective in Constant and Nivre (2016) . They proposed an arc-standard transition-based system which draws on a new representation that has two linguistic layers (a syntactic dependency tree and a forest of MWEs) sharing lexical nodes. The closest to our work is where they have trained a multi-task neural network which jointly learns VMWEs and dependency parsing on a small English dataset and uses ELMo pre-trained embeddings. Our work here is different in that we fine-tune the BERT architecture and we use a tree CRF for dependency parsing.", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 30, |
| "text": "Savary et al. (2019)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 467, |
| "end": 488, |
| "text": "(Peters et al., 2017)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 748, |
| "end": 773, |
| "text": "Constant and Nivre (2016)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We use pre-trained BERT for language representation (Devlin et al., 2019) as the basis for our neural network. The BERT architecture is based on standard transformers involving self-attention layers of encoders and decoders. 3 What makes it different from other transformer-based pre-trained language representation models is its capability in encoding the representation in a bidirectional way through a masked language model schema. The reason that we choose BERT among other pre-trained models is the availability of multi-lingual pre-trained BERT. 4 Our model is set up to learn MWEs and dependency trees simultaneously. BERT weights are shared among the two tasks. A fully connected layer that performs sequence tagging is added as the final layer for MWE objective. Parallel to that, linear layers and a dependency CRF module are introduced to perform structured prediction for dependency trees. 5 The whole model is trained in an end-to-end manner. Figure 1 depicts the overall architecture of the system.", |
| "cite_spans": [ |
| { |
| "start": 52, |
| "end": 73, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 225, |
| "end": 226, |
| "text": "3", |
| "ref_id": null |
| }, |
| { |
| "start": 552, |
| "end": 553, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 956, |
| "end": 964, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "System Description", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We use Torch-Struct (Rush, 2020) for dependency parsing where Tree CRF is implemented as a distribution object. We first apply a linear followed by a bilinear layer on BERT's output to obtain the adjacency matrix structure of the dependency tree. The outputs from these layers are considered as logpotentials (l) for the CRF distribution. The distribution takes in log-potentials and converts them into", |
| "cite_spans": [ |
| { |
| "start": 20, |
| "end": 32, |
| "text": "(Rush, 2020)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System Description", |
| "sec_num": "3" |
| }, |
| { |
| "text": "w 1 w 2 w 3 w n \u2026 BERT MWE tagger Tree CRF dependency tagger T 1 T 2 T n softmax :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System Description", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Bilinear node Figure 1 : The overall architecture of the multi-task learning model with two branches on top of BERT. One is a linear classifier layer for MWE tagging and the other consists of a linear layer, a bilinear layer and a tree CRF dependency tagger.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 14, |
| "end": 22, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "System Description", |
| "sec_num": "3" |
| }, |
| { |
| "text": "probabilities CRF (z; l) of a specific tree z. We query the distribution to predict over the set of trees using argmax z CRF (z; l). The cost for updating the tree is based on the difference between the tree probability and the gold standard dependency arcs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System Description", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The MWE classification layer is optimised by cross-entropy between the ground truth MWE tags and the predicted ones, while the cost for CRF is estimated using log probabilities over the tree structures. Note that log probabilities (logprobs) for CRF are large negative values which should be maximised, so we multiply them by \u22121 to get the dependency loss values compatible with MWE ones: Loss dep = \u2212logprobs. The overall loss function to be optimised by ADAM optimiser is a linear combination of the two losses, Loss mwe and Loss dep which are the losses for multi-word expression and dependency parse tree, respectively, with \u03b1 being a constant value which is empirically set to 0.001 \u2264 \u03b1 \u2264 0.01.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System Description", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Loss = Loss mwe + \u03b1 * Loss dep (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System Description", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We adapted the sequential labelling scheme of Rohanian et al. 2019which is similar to IOB with the difference that it introduces a new soft label o-for the tokens that are in between components of an MWE. We preserved MWE categories by suffixing the label with the category name. In this case, the annotations for the idiomatic verbal expression (of type VID) in the sentence I would give this job a go, would be:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "I [O] would [O] give [B-VID] this [o-VID] job [o-VID] a [I-VID] go [I-VID]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "with the labels shown as subscripts in brackets. 6 In the development phase of the shared task, we trained various configurations of our system and evaluated the performance on development sets. Specifically, we examined the performance of our model in two settings: (1) the model is back-propagated only based on Loss mwe (single-task), and (2) the learning is based on the multi-task Loss (multi-task). We decided on the setting to be used for each language separately based on the performance on development sets. We used bert-base-multilingualcased as the pre-trained model for all languages. 7 Due to lack of time and resources, we did not perform any extensive hyper-parameter search. We empirically chose learning rate 3 \u00d7 10 \u22125 and batch size 10 (except for GA where the selected batch size is 1). We trained the models for 10 epochs, and the maximum lengths of sentences for training were chosen for each language separately based on the word piece tokenisation of multilingual BERT. 8 Table 1 shows results on the development sets. According to the shared task criteria, we report MWEbased precision, recall and F1 measures for all VMWEs and unseen ones in particular. We also consider the scores on the expressions which are syntactic variants of their occurrences in the training data useful to be reported. We chose the best setting for each language based on F1 scores on unseen VMWEs (in bold). The systems marked by * (in Table 1 ) are trained after the evaluation period; therefore, their scores on test are not available in the official evaluation report. In the multi-task setting we tried two \u03b1 values: for DE, EU, FR, GA, HE, HI, IT, PL, PT and ZH). The best model for each language was trained on both train and dev sets. The results obtained on test data are reported in Section 5. After the evaluation period, we also fine-tuned the dependency-CRF branch of the model on some portions of extra data for several lower-resource languages (e.g. GA, HI, HE and TR). We saw no notable improvement except for TR as reported in Table 1 (multi-taks + extra data). We only fine-tuned the model to learn unlabeled trees for dependency arcs, which are made available for additional data as part of the shared task. Due to being limited by the amount of computational power, we only partially used the extra unannotated data; therefore we leave the experiments on their effects to future work. Table 2 shows the summary results of our system MTLB-STRUCT on test sets. For each language, we report the employed system (single or multi-task), the ratio of unseen data in the test set, global and unseen MWE-based F1 scores, and finally the system's rank (#) in the open track of the shared task. 9", |
| "cite_spans": [ |
| { |
| "start": 49, |
| "end": 50, |
| "text": "6", |
| "ref_id": null |
| }, |
| { |
| "start": 2664, |
| "end": 2667, |
| "text": "(#)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 995, |
| "end": 1002, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 1438, |
| "end": 1445, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 2045, |
| "end": 2052, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 2406, |
| "end": 2413, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Global Unseen Table 2 : The percentage of unseen expressions (unseen %), and Global and Unseen MWE-based F1 results for all languages (Lang) in test. Column # indicates the ranking of our system in the shared task.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 14, |
| "end": 21, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Lang System", |
| "sec_num": null |
| }, |
| { |
| "text": "Lang", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lang System", |
| "sec_num": null |
| }, |
| { |
| "text": "Our system is applied to all 14 languages and achieves the highest F1 score overall. The amount of MWEs seen in the training data is the largest contributing factor, as the percentage of seen-in-train gold MWEs is highly linearly correlated (r = 0.90) with the global MWE-based F1 score across the languages. We achieve the highest performance in terms of MWE-based F1 score on unseen data for 8 out of 14 languages, with the largest gaps in performance observed on PT, where our system outperforms Seen2Unseen by 21.59 points, and on HI, where the gap between our system's F1 and that of Seen2Unseen equals 10.45 points. We note that our system works significantly better than the second best systems for smaller datasets (GA, HE, and HI) which also happen to have larger amount of unseen expressions. At the same time, TRAVIS-mono outperforms our system on FR, IT, PL, TR, and ZH, with the largest gap of 5.68 points observed on FR.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lang System", |
| "sec_num": null |
| }, |
| { |
| "text": "In addition, our system's performance is balanced across continuous and discontinuous MWEs, with the exceptions of HI and TR, where discontinuous MWEs amount to 7% and 4% of all MWEs, respectively, and our system's performance drops by as much as 30 F1 points compared to its performance on continuous MWEs. The distinction between multiand single-token MWEs is only applicable to 3 languages, on two of which (DE and SV) our system achieves an F1 score above 0.80 on single tokens. Finally, the shared task data shows a wide diversity of VMWE categories present in different languages: from just three in EU and TR up to eight in IT. Once again, we note that our system is applicable to detection of all categories: for instance, it achieves the highest F1 scores among all systems in identification of LS.ICV, a rare language-specific category of inherently clitic verbs used only in Italian. At the same time, we identify LVC.cause, light-verb constructions with the verb adding a causative meaning to the noun, as the most problematic category on which our system achieves comparatively poorer results, especially on DE, EL, FR, HI, PT, and SV.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lang System", |
| "sec_num": null |
| }, |
| { |
| "text": "It is worth noting that no language specific feature is used in our system and the authors were not involved in the creation of the datatsets. Overall, we note that our system is not only cross-lingual, but also robust in terms of its performance and is capable of generalising to unseen MWEs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lang System", |
| "sec_num": null |
| }, |
| { |
| "text": "We described MTLB-STRUCT, a semi-supervised system that is based on pre-trained BERT masked language modelling and that jointly learns VMWE tags and dependency parse trees. The system ranked first in the open track of the PARSEME shared task -edition 1.2 and shows the overall state-of-theart performance for detecting unseen VMWEs. In future, we plan to augment the dependency parsing architecture to train on dependency relation categories (labels) as well as dependency arcs. We also plan to improve our system by making it more efficient in order to train the dependency parsing module on the extra available unannotated datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "There are 12 layers (transformer blocks) following the implementation of http://nlp.seas.harvard.edu/2018/ 04/03/attention.html, with the hidden dimension size of 768 and 12 attention heads.4 https://huggingface.co/bert-base-multilingual-cased 5 In this work, we only focus on dependency arcs (tree structures) and we do not model dependency relation labels.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Embedded MWEs can be detected only if the nested MWE is not part of the nesting one and their categories are different.7 We tried uncased multilingual models, for FR and PL in particular, but we didn't observe any improvements. 8 When tokenisation splits words into multiple pieces, we took the prediction for the first piece as the prediction for the word. We masked the rest in the learning process.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "More detailed results (including precision and recall values, and token-based performance measures) are available on the shared task web page: http://multiword.sourceforge.net/PHITE.php?sitesig=CONF&page=CONF_02_ MWE-LEX_2020___lb__COLING__rb__&subpage=CONF_50_Shared_task_results", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This paper reports on research supported by Cambridge Assessment, University of Cambridge. We are grateful to the anonymous reviewers for their valuable feedback. We gratefully acknowledge the support of NVIDIA Corporation with the donation of the Titan V GPU used in this research.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "The ATILF-LLF system for PARSEME shared task: a transition-based verbal multiword expression tagger", |
| "authors": [ |
| { |
| "first": "Al", |
| "middle": [], |
| "last": "Hazem", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthieu", |
| "middle": [], |
| "last": "Saied", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie", |
| "middle": [], |
| "last": "Constant", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Candito", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 13th Workshop on Multiword Expressions", |
| "volume": "", |
| "issue": "", |
| "pages": "127--132", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hazem Al Saied, Matthieu Constant, and Marie Candito. 2017. The ATILF-LLF system for PARSEME shared task: a transition-based verbal multiword expression tagger. In Proceedings of the 13th Workshop on Multiword Expressions (MWE 2017), pages 127-132.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Multiword expressions", |
| "authors": [ |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "" |
| }, |
| { |
| "first": "Su", |
| "middle": [ |
| "Nam" |
| ], |
| "last": "Kim", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Handbook of Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "267--292", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Timothy Baldwin and Su Nam Kim. 2010. Multiword expressions. In Handbook of Natural Language Processing, second edition., pages 267-292. CRC Press.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A transition-based system for joint lexical and syntactic analysis", |
| "authors": [ |
| { |
| "first": "Matthieu", |
| "middle": [], |
| "last": "Constant", |
| "suffix": "" |
| }, |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "161--171", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthieu Constant and Joakim Nivre. 2016. A transition-based system for joint lexical and syntactic analysis. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 161-171.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "UFRGS&LIF at SemEval-2016 task 10: rulebased mwe identification and predominant-supersense tagging", |
| "authors": [ |
| { |
| "first": "Silvio", |
| "middle": [], |
| "last": "Cordeiro", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Ramisch", |
| "suffix": "" |
| }, |
| { |
| "first": "Aline", |
| "middle": [], |
| "last": "Villavicencio", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 10th International Workshop on Semantic Evaluation (SemEval-2016)", |
| "volume": "", |
| "issue": "", |
| "pages": "910--917", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Silvio Cordeiro, Carlos Ramisch, and Aline Villavicencio. 2016. UFRGS&LIF at SemEval-2016 task 10: rule- based mwe identification and predominant-supersense tagging. In Proceedings of the 10th International Work- shop on Semantic Evaluation (SemEval-2016), pages 910-917.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirec- tional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota, June. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Unsupervised type and token identification of idiomatic expressions", |
| "authors": [ |
| { |
| "first": "Afsaneh", |
| "middle": [], |
| "last": "Fazly", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Cook", |
| "suffix": "" |
| }, |
| { |
| "first": "Suzanne", |
| "middle": [], |
| "last": "Stevenson", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Computational Linguistics", |
| "volume": "35", |
| "issue": "1", |
| "pages": "61--103", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Afsaneh Fazly, Paul Cook, and Suzanne Stevenson. 2009. Unsupervised type and token identification of idiomatic expressions. Computational Linguistics, 35(1):61-103.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "CRF-Seq and CRF-DepTree at PARSEME shared task 2018: Detecting verbal mwes using sequential and dependency-based approaches", |
| "authors": [ |
| { |
| "first": "Erwan", |
| "middle": [], |
| "last": "Moreau", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashjan", |
| "middle": [], |
| "last": "Alsulaimani", |
| "suffix": "" |
| }, |
| { |
| "first": "Alfredo", |
| "middle": [], |
| "last": "Maldonado", |
| "suffix": "" |
| }, |
| { |
| "first": "Carl", |
| "middle": [], |
| "last": "Vogel", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Joint Workshop on Linguistic Annotation, Multiword Expressions and Constructions (LAW-MWE-CxG-2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "241--247", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Erwan Moreau, Ashjan Alsulaimani, Alfredo Maldonado, and Carl Vogel. 2018. CRF-Seq and CRF-DepTree at PARSEME shared task 2018: Detecting verbal mwes using sequential and dependency-based approaches. In Proceedings of the Joint Workshop on Linguistic Annotation, Multiword Expressions and Constructions (LAW- MWE-CxG-2018), pages 241-247.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Incrementality in deterministic dependency parsing", |
| "authors": [ |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the workshop on incremental parsing: Bringing engineering and cognition together", |
| "volume": "", |
| "issue": "", |
| "pages": "50--57", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joakim Nivre. 2004. Incrementality in deterministic dependency parsing. In Proceedings of the workshop on incremental parsing: Bringing engineering and cognition together, pages 50-57.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Towards a variability measure for multiword expressions", |
| "authors": [ |
| { |
| "first": "Caroline", |
| "middle": [], |
| "last": "Pasquer", |
| "suffix": "" |
| }, |
| { |
| "first": "Agata", |
| "middle": [], |
| "last": "Savary", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean-Yves", |
| "middle": [], |
| "last": "Antoine", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Ramisch", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "2", |
| "issue": "", |
| "pages": "426--432", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Caroline Pasquer, Agata Savary, Jean-Yves Antoine, and Carlos Ramisch. 2018. Towards a variability measure for multiword expressions. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 426-432, New Orleans, Louisiana, June. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Semi-supervised sequence tagging with bidirectional language models", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Waleed", |
| "middle": [], |
| "last": "Ammar", |
| "suffix": "" |
| }, |
| { |
| "first": "Chandra", |
| "middle": [], |
| "last": "Bhagavatula", |
| "suffix": "" |
| }, |
| { |
| "first": "Russell", |
| "middle": [], |
| "last": "Power", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1756--1765", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Peters, Waleed Ammar, Chandra Bhagavatula, and Russell Power. 2017. Semi-supervised sequence tagging with bidirectional language models. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1756-1765.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "How multilingual is multilingual bert", |
| "authors": [ |
| { |
| "first": "Telmo", |
| "middle": [], |
| "last": "Pires", |
| "suffix": "" |
| }, |
| { |
| "first": "Eva", |
| "middle": [], |
| "last": "Schlinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Garrette", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1906.01502" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Telmo Pires, Eva Schlinger, and Dan Garrette. 2019. How multilingual is multilingual bert? arXiv preprint arXiv:1906.01502.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "mwetoolkit: a framework for multiword expression identification", |
| "authors": [ |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Ramisch", |
| "suffix": "" |
| }, |
| { |
| "first": "Aline", |
| "middle": [], |
| "last": "Villavicencio", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Boitet", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the Seventh International Conference on Language Resources and Evaluation (LREC'10)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Carlos Ramisch, Aline Villavicencio, and Christian Boitet. 2010. mwetoolkit: a framework for multiword ex- pression identification. In Proceedings of the Seventh International Conference on Language Resources and Evaluation (LREC'10).", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Edition 1.1 of the PARSEME shared task on automatic identification of verbal multiword expressions", |
| "authors": [ |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Ramisch", |
| "suffix": "" |
| }, |
| { |
| "first": "Silvio", |
| "middle": [], |
| "last": "Cordeiro", |
| "suffix": "" |
| }, |
| { |
| "first": "Agata", |
| "middle": [], |
| "last": "Savary", |
| "suffix": "" |
| }, |
| { |
| "first": "Veronika", |
| "middle": [], |
| "last": "Vincze", |
| "suffix": "" |
| }, |
| { |
| "first": "Verginica", |
| "middle": [], |
| "last": "Mititelu", |
| "suffix": "" |
| }, |
| { |
| "first": "Archna", |
| "middle": [], |
| "last": "Bhatia", |
| "suffix": "" |
| }, |
| { |
| "first": "Maja", |
| "middle": [], |
| "last": "Buljan", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie", |
| "middle": [], |
| "last": "Candito", |
| "suffix": "" |
| }, |
| { |
| "first": "Polona", |
| "middle": [], |
| "last": "Gantar", |
| "suffix": "" |
| }, |
| { |
| "first": "Voula", |
| "middle": [], |
| "last": "Giouli", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "the Joint Workshop on Linguistic Annotation, Multiword Expressions and Constructions", |
| "volume": "", |
| "issue": "", |
| "pages": "222--240", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Carlos Ramisch, Silvio Cordeiro, Agata Savary, Veronika Vincze, Verginica Mititelu, Archna Bhatia, Maja Buljan, Marie Candito, Polona Gantar, Voula Giouli, et al. 2018. Edition 1.1 of the PARSEME shared task on automatic identification of verbal multiword expressions. In the Joint Workshop on Linguistic Annotation, Multiword Expressions and Constructions (LAW-MWE-CxG-2018), pages 222-240.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Semi-supervised multitask learning for sequence labeling", |
| "authors": [ |
| { |
| "first": "Marek", |
| "middle": [], |
| "last": "Rei", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "2121--2130", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marek Rei. 2017. Semi-supervised multitask learning for sequence labeling. In Proceedings of the 55th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2121-2130, Vancouver, Canada, July. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Bridging the gap: Attending to discontinuity in identification of multiword expressions", |
| "authors": [ |
| { |
| "first": "Shiva", |
| "middle": [], |
| "last": "Omid Rohanian", |
| "suffix": "" |
| }, |
| { |
| "first": "Samaneh", |
| "middle": [], |
| "last": "Taslimipoor", |
| "suffix": "" |
| }, |
| { |
| "first": "Le", |
| "middle": [ |
| "An" |
| ], |
| "last": "Kouchaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Ha", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mitkov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1902.10667" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Omid Rohanian, Shiva Taslimipoor, Samaneh Kouchaki, Le An Ha, and Ruslan Mitkov. 2019. Bridging the gap: Attending to discontinuity in identification of multiword expressions. arXiv preprint arXiv:1902.10667.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Torch-Struct: Deep structured prediction library", |
| "authors": [ |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Rush", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "335--342", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexander Rush. 2020. Torch-Struct: Deep structured prediction library. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: System Demonstrations, pages 335-342, Online, July. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Detecting non-compositional MWE components using Wiktionary", |
| "authors": [ |
| { |
| "first": "Bahar", |
| "middle": [], |
| "last": "Salehi", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Cook", |
| "suffix": "" |
| }, |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1792--1797", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bahar Salehi, Paul Cook, and Timothy Baldwin. 2014. Detecting non-compositional MWE components using Wiktionary. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1792-1797, Doha, Qatar, October. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "The PARSEME shared task on automatic identification of verbal multiword expressions", |
| "authors": [ |
| { |
| "first": "Agata", |
| "middle": [], |
| "last": "Savary", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Ramisch", |
| "suffix": "" |
| }, |
| { |
| "first": "Silvio", |
| "middle": [], |
| "last": "Cordeiro", |
| "suffix": "" |
| }, |
| { |
| "first": "Federico", |
| "middle": [], |
| "last": "Sangati", |
| "suffix": "" |
| }, |
| { |
| "first": "Veronika", |
| "middle": [], |
| "last": "Vincze", |
| "suffix": "" |
| }, |
| { |
| "first": "Behrang", |
| "middle": [], |
| "last": "Qasemizadeh", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie", |
| "middle": [], |
| "last": "Candito", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabienne", |
| "middle": [], |
| "last": "Cap", |
| "suffix": "" |
| }, |
| { |
| "first": "Voula", |
| "middle": [], |
| "last": "Giouli", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivelina", |
| "middle": [], |
| "last": "Stoyanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 13th Workshop on Multiword Expressions", |
| "volume": "", |
| "issue": "", |
| "pages": "31--47", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Agata Savary, Carlos Ramisch, Silvio Cordeiro, Federico Sangati, Veronika Vincze, Behrang Qasemizadeh, Marie Candito, Fabienne Cap, Voula Giouli, Ivelina Stoyanova, et al. 2017. The PARSEME shared task on automatic identification of verbal multiword expressions. In Proceedings of the 13th Workshop on Multiword Expressions (MWE 2017), pages 31-47.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Without lexicons, multiword expression identification will never fly: A position statement", |
| "authors": [ |
| { |
| "first": "Agata", |
| "middle": [], |
| "last": "Savary", |
| "suffix": "" |
| }, |
| { |
| "first": "Silvio", |
| "middle": [ |
| "Ricardo" |
| ], |
| "last": "Cordeiro", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Ramisch", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Joint Workshop on Multiword Expressions and WordNet (MWE-WN 2019)", |
| "volume": "", |
| "issue": "", |
| "pages": "79--91", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Agata Savary, Silvio Ricardo Cordeiro, and Carlos Ramisch. 2019. Without lexicons, multiword expression identification will never fly: A position statement. In Joint Workshop on Multiword Expressions and WordNet (MWE-WN 2019), pages 79-91. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Discriminative lexical semantic segmentation with gaps: running the mwe gamut", |
| "authors": [ |
| { |
| "first": "Nathan", |
| "middle": [], |
| "last": "Schneider", |
| "suffix": "" |
| }, |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Danchik", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah A", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "193--206", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nathan Schneider, Emily Danchik, Chris Dyer, and Noah A Smith. 2014. Discriminative lexical semantic seg- mentation with gaps: running the mwe gamut. Transactions of the Association for Computational Linguistics, 2:193-206.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "SHOMA at Parseme shared task on automatic identification of VMWEs: Neural multiword expression tagging with high generalisation", |
| "authors": [ |
| { |
| "first": "Shiva", |
| "middle": [], |
| "last": "Taslimipoor", |
| "suffix": "" |
| }, |
| { |
| "first": "Omid", |
| "middle": [], |
| "last": "Rohanian", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1809.03056" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shiva Taslimipoor and Omid Rohanian. 2018. SHOMA at Parseme shared task on automatic identification of VMWEs: Neural multiword expression tagging with high generalisation. arXiv preprint arXiv:1809.03056.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Cross-lingual transfer learning and multitask learning for capturing multiword expressions", |
| "authors": [ |
| { |
| "first": "Shiva", |
| "middle": [], |
| "last": "Taslimipoor", |
| "suffix": "" |
| }, |
| { |
| "first": "Omid", |
| "middle": [], |
| "last": "Rohanian", |
| "suffix": "" |
| }, |
| { |
| "first": "Le", |
| "middle": [ |
| "An" |
| ], |
| "last": "Ha", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Joint Workshop on Multiword Expressions and WordNet (MWE-WN 2019)", |
| "volume": "", |
| "issue": "", |
| "pages": "155--161", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shiva Taslimipoor, Omid Rohanian, and Le An Ha. 2019. Cross-lingual transfer learning and multitask learning for capturing multiword expressions. In Proceedings of the Joint Workshop on Multiword Expressions and WordNet (MWE-WN 2019), pages 155-161.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "TRAVERSAL at PARSEME shared task 2018: Identification of verbal multiword expressions using a discriminative tree-structured model", |
| "authors": [ |
| { |
| "first": "Jakub", |
| "middle": [], |
| "last": "Waszczuk", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Joint Workshop on Linguistic Annotation, Multiword Expressions and Constructions", |
| "volume": "", |
| "issue": "", |
| "pages": "275--282", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jakub Waszczuk. 2018. TRAVERSAL at PARSEME shared task 2018: Identification of verbal multiword ex- pressions using a discriminative tree-structured model. In Proceedings of the Joint Workshop on Linguistic Annotation, Multiword Expressions and Constructions (LAW-MWE-CxG-2018), pages 275-282.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "num": null, |
| "html": null, |
| "text": "Global, Unseen and Variant MWE-based scores on validation datasets.", |
| "type_str": "table", |
| "content": "<table><tr><td>1 300 and 1 700 . We used the value that worked best for each language ( 1 300 for EL, RO, SV and TR, and 1 700</td></tr></table>" |
| } |
| } |
| } |
| } |