| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T09:02:08.869781Z" |
| }, |
| "title": "From Sense to Action: A Word-Action Disambiguation Task in NLP", |
| "authors": [ |
| { |
| "first": "Shu-Kai", |
| "middle": [], |
| "last": "Hsieh", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Graduate Institute of Linguistics National Taiwan University", |
| "location": {} |
| }, |
| "email": "shukaihsieh@ntu.edu.tw" |
| }, |
| { |
| "first": "Yu-Hsiang", |
| "middle": [], |
| "last": "Tseng", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Taiwan University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Chiung-Yu", |
| "middle": [], |
| "last": "Chiang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Taiwan University", |
| "location": {} |
| }, |
| "email": "cychiang@ntu.edu.tw" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Lian", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Taiwan University", |
| "location": {} |
| }, |
| "email": "dclian@nlg.csie.ntu.edu.tw" |
| }, |
| { |
| "first": "Yong-Fu", |
| "middle": [], |
| "last": "Liao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Taiwan University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Mao-Chang", |
| "middle": [], |
| "last": "Ku", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Taiwan University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Ching-Fang", |
| "middle": [], |
| "last": "Shih", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Taiwan University", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Words are conventionalized symbols that present the function by which meaning is attached to form. The Word Sense Disambiguation, which has been taken as one of the core semantic processing tasks in the pipe-lined NLP architecture, aims to assign proper word sense to lemma form in varied contexts based on a word-sense inventory such as WordNet. However, there are some theoretical assumptions unattested from a functional linguistic point of view. This paper proposes an alternative by introducing a novel task called word action disambiguation task (WAD) concentrated on the observable pairs between words and actions. The accompanying dataset, which was manually edited and compiled, is composed of 419 multiple-choice questions. We further verified the dataset through item evaluation with human rating data, and the semantic relations among the dataset were annotated automatically. A baseline performance with an accuracy of 38.64% was also provided with BERT models and 43.18% after incorporating paradigmatic knowledge with semantic graph. We expect the proposal of the WAD task and dataset would motivate computational models to incorporate more complex aspects of human language.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Words are conventionalized symbols that present the function by which meaning is attached to form. The Word Sense Disambiguation, which has been taken as one of the core semantic processing tasks in the pipe-lined NLP architecture, aims to assign proper word sense to lemma form in varied contexts based on a word-sense inventory such as WordNet. However, there are some theoretical assumptions unattested from a functional linguistic point of view. This paper proposes an alternative by introducing a novel task called word action disambiguation task (WAD) concentrated on the observable pairs between words and actions. The accompanying dataset, which was manually edited and compiled, is composed of 419 multiple-choice questions. We further verified the dataset through item evaluation with human rating data, and the semantic relations among the dataset were annotated automatically. A baseline performance with an accuracy of 38.64% was also provided with BERT models and 43.18% after incorporating paradigmatic knowledge with semantic graph. We expect the proposal of the WAD task and dataset would motivate computational models to incorporate more complex aspects of human language.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Due to its polysemous behavior, selecting the most appropriate sense for a word in a text has been one of the most important yet challenging NLP tasks over the years. Given a pre-defined sense inventory, computationally assigning each word in target texts with proper sense (thus Word Sense Disambiguation) is assumed to be crucial for MT, IR, QA, and other systems (Navigli, 2009) . Although the sense inventory such as WordNet has been continuously maintained and implemented cross-linguistically, the issue regarding the extent to which the sense granularity (i.e., levels of semantic specificity) in the sense inventory would be sufficient for downstream NLP tasks remains less explored.", |
| "cite_spans": [ |
| { |
| "start": 366, |
| "end": 381, |
| "text": "(Navigli, 2009)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Three tacit and intertwined assumptions underlying the conventional WSD task are (1) word senses can be operationalized as discrete and distinguishable ones, (2) word senses (as included in the sense inventory) can be shared by the entire language community, and (3) WSD with the finegrained sense specification can be successfully applied to actual language data, and facilitate a wide range of downstream NLP tasks. However, the reported poor inter-annotator agreement (IAA) and low reliability of sense distinction/annotation in the task seem to falsify these assumptions and thus motivate projects like OntoNotes (Hovy et al., 2006; Cinkov\u00e1 et al., 2012) .", |
| "cite_spans": [ |
| { |
| "start": 617, |
| "end": 636, |
| "text": "(Hovy et al., 2006;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 637, |
| "end": 658, |
| "text": "Cinkov\u00e1 et al., 2012)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This paper aims to serve as a first attempt to propose an alternative to the underlying assumptions from the functional and granular linguistic perspective. First, the notion wordhood of as assumed in the WSD task is not self-evident, particularly for languages whose writing systems do not provide the delimiter of a word boundary. In this aspect, word segmentation or determination is rather theoryladen and would be best regarded as the wordhood annotation rather than the preprocessing task with ground truth as conventionally taken. Second, word-meaning pairs are fluid in nature, whose granularity (in terms of the length of the word and the functions it carries) is influenced by its underlying ontology (paradigmatic dimension), surrounding context (syntagmatic dimension) and real-world application (pragmatic force). Under this view, it is hard to get a common, static, or solid 'feel of sense' among native speakers. Finally, it is still unclear regarding the relation between WSD and Natural Language Understanding (NLU). For instance, what levels of granularity of sense (from fine-grained to coarse-grained) do we need for the machine comprehension, or in what sense can we justify that WSD is a sine qua non for NLU?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "There has been a huge amount of related work trying to grapple with the WSD-related issues by exploiting various machine learning models (Navigli, 2009) . On the resource side, in order to achieve better efficiency and performance, sense granularity in the sense inventory such as WordNet was explored and annotated in OntoNotes project (Weischedel et al., 2011; Palmer, Dang & Fellbaum, 2005) . However, the paradigm underlying the WSD task has also been questioned since (Kilgariff, 1997) , and sense discretization and enumerative view of word senses inventory that is implicitly/explicitly presume is strongly criticized as well (Pustejovsky, 1995) . Consequently, we adopt a functional linguistic approach to the linguistic units and introduce the design of a novel task, which can be regarded as an in vivo evaluation of the WSD system.", |
| "cite_spans": [ |
| { |
| "start": 137, |
| "end": 152, |
| "text": "(Navigli, 2009)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 337, |
| "end": 362, |
| "text": "(Weischedel et al., 2011;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 363, |
| "end": 393, |
| "text": "Palmer, Dang & Fellbaum, 2005)", |
| "ref_id": null |
| }, |
| { |
| "start": 473, |
| "end": 490, |
| "text": "(Kilgariff, 1997)", |
| "ref_id": null |
| }, |
| { |
| "start": 633, |
| "end": 652, |
| "text": "(Pustejovsky, 1995)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In terms of language understanding, we see language as a communication device used to ask, demand, raise questions. The utterance, either in spoken or written forms, is an observable word sequence which encodes the speaker's illocutionary force, the \"combination of the illocutionary point of an utterance, and the particular presuppositions and attitudes that must accompany that point\" (Searle and Vanderveken, 1985) . In pragmatics, illocutionary force further distinguishes the following types of acts: inquiring, promising, asserting, ordering, etc. As the words which serve as the building block in the sequence are mostly polysemous, it is thus commonly and naively assumed that one core part of our NLU competence depends on the identification of the correct word sense for each word in the utterance, and the understanding is accomplished in a compositional manner. That's the basic underlying philosophy of the current WSD task. However, these senses are unobservable theoretical constructs. In the communicative context, as long as listeners can react with proper observable responses, the mechanism underlying the word sense disambiguation inside the listener's mind is only latent constructs. That is, the listeners understand the utterances when they react with proper actions against them. This leads to Davidsonian notion of action (Davidson, 1985) , that an action is something an agent does that was 'intentional under some description'. The relations among words, senses, and the Davidsonian actions with a framework is depicted in Figure 1 .", |
| "cite_spans": [ |
| { |
| "start": 388, |
| "end": 418, |
| "text": "(Searle and Vanderveken, 1985)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 1348, |
| "end": 1364, |
| "text": "(Davidson, 1985)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1551, |
| "end": 1559, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To illustrate the relationship between words and actions, we develop a novel task called word action disambiguation task (WAD) to highlight the communication and dynamic aspect of word and action. The action inevitably reduced to textual descriptions in the task in order to be efficiently processed by machines. However, the proposed task underlines the interactions between words and actions by emphasizing the pragmatic and context-dependent nature among them, and the relationship between them cannot be solely determined by lexical semantics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This section explains the proposed task and its corresponding dataset to alleviate the issues when splitting fine-grained, continuous word senses as assumed in previous WSD studies. The new task concentrates on the observable words and actions elicited. The task is implemented in the form like multiple-choice decision. A dataset with 439 items was also compiled to accompany the proposed task by 9 annotators. In each item, the question states a scenario, situation, or dialogue, in which a critical word is embedded. The critical words are polysemous single-character words selected from CWN.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Action Disambiguation Task and its Dataset", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Resulting from the word's polysemy, four possible descriptions of actions are listed as options. An agent's (models or computer agents) task is to select the most proper action based on the understanding of the critical word's sense. The critical words are selected from Chinese Wordnet (CWN). Followed by rigorous lexical-semantic theories, CWN distinguishes fine-grained differences between word senses. In the WAD dataset, we selected 400 single-character verbs with more than 3 verbal senses. Among these senses, we defined 4 critical senses of each word where proper action would be impossible if the word senses are conflated. For example, \u53eb \"jiao4\" has 13 senses listed in CWN. In the sentence: \"\u9913\u4e86\u5c31\u53eb\u6c34\u9903\u4f86 \u5403 (Order some dumplings if you are hungry.)\", the sense of the critical word \u53eb (jiao4) refers to \"order something\". If the agent misunderstands it as calling someone over, \u6c34\u9903 (shui3jiao3) would be a human, not a kind of food, the resulting actions would be improper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Action Disambiguation Task and its Dataset", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A complete WAD task item is as follows. We first identify 4 critical word senses and created multiple-choice questions and options (the critical word is marked with angle brackets): The cargo must be heavy. No wonder the cargo ship cannot anchor here. The critical word, \u5403 (chi1), has 28 senses in CWN. The question states a scenario in a night market, using the sense of \u5403 (chi1) which refers to \"eat something\". Options followed are 4 other possible responses toward based on other critical senses: (A) to eat something in; (B) to consume lots of resources; (C) to indicate that a card is captured by a cash machine, and (D) to displace the water while the boat is immersed in the sea. The correct answer to the question is option A.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Action Disambiguation Task and its Dataset", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u6211\u6628\u5929<\u5403>\u4e86\u516c\u9928\u591c\u5e02\u7684\u81ed\u8c46\u8150\uff0c\u771f\u68d2 I <had>", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Action Disambiguation Task and its Dataset", |
| "sec_num": "2" |
| }, |
| { |
| "text": "These 4 options refer to the respective sense by the frame semantics, pragmatics, context, or common-sense knowledge. Importantly, the options are designed not to relate to the question with lexical semantics alone. That is, the questions and options are designed so the mapping relations between words and actions cannot be easily learned by models based on current syntagmatic vector semantics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Action Disambiguation Task and its Dataset", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The proposed WAD dataset is aimed to be pragmatically, contextually, real-world relevant word action pairs, and these pairs cannot be determined by a model trained only on syntagmatic relations. Therefore, we verify the dataset with two approaches. (1) Item evaluation: we collect human raters' responses on these items and select the most appropriate items to include in the final dataset (Section 3). (2) Dataset Evaluation: we attempted a current deep learning model; the resulting performance is a tentative baseline on the proposed dataset (Section 4).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Action Disambiguation Task and its Dataset", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We evaluated items in the dataset with human ratings. Results of rating data were used to select the most appropriate items to include in the final dataset. We first describe methods of collecting rating data and item selection results (Section 3.2).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Item Evaluation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Five Mandarin native speakers, aged from 19 to 24, were recruited in the rating study. After researchers gave instructions, raters were asked to evaluate how well each option matches the question stem. We used a 5-point scale Likert scale on each rating item: from definitely not the correct answer (point 1), not likely to be the correct answer (point 2), possibly incorrect or possibly correct (point 3), likely to be the correct answer (point 4), and definitely the correct answer (point 5). Each rater went through all 1756 question-option pairs. They responded with independent spreadsheets so that ratings data would not be seen by other raters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Item Rating Study", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "There were 8,780 rating scores collected. The mean and the standard deviation of each questionoption pairing were shown in Figure 2 . The rating means of each pair are bimodally distributed, where modes occurred in point 1 and point 5. The pattern was expected as it indicated the raters tend to agree on which option should or should not be the appropriate choice. The fact that the frequency of ratings with higher scores (above 4) is lower than the frequency of ratings with lower scores (below 2) also aligns with this expectation since only a quarter of the options were designed to be the appropriate choices in the sense-action dataset.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 123, |
| "end": 131, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Item Rating Study", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The standard deviation of the ratings for each option signified inter-rater agreements. If raters did not agree on a pair, the rating scores would differ widely, resulting in a large standard deviation; on the contrary, if raters all agree on a pair and gave it the same scores, the standard deviation would be 0. As shown in Figure 2 , the distribution of the standard deviations is right-skewed, with most of the standard deviations (64%) having values below 1.0. This indicates a high agreement on the ratings among the raters.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 326, |
| "end": 334, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Item Rating Study", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We devised a two-phase selection scheme each employing a criterion to select appropriate items respectively: agreement criterion and contrast criterion. Two indices were calculated for each criterion: (1) Agreement between correct (as designated by the question authors) and maximally rated options and (2) the ratio between the highest and second-highest rating.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Item Selection", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The agreement between correct and maximally rated options indicated the appropriateness of the answer created by the question authors. If the correct answer is rated lower than other options, the question was clearly not suitable in the dataset and therefore dropped. There were 10 questions omitted in this phase. This process filters out ten sense-action pairs and yields 429 remaining pairs (98%). In the second phase, we remove those pairs where the ratio between the highest and second highest below 1.15. This index indicated the ambiguity of the correct answer among other candid options. If the correct options were rated close to other options, the questions may involve complicated pragmatic or context considerations that cannot be resolved clearly even by human raters. There were further 10 items dropped in this phase. After two phases of item selection, there were 5% of dropped items and resulted in 419 items included in the final dataset 1 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Item Selection", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "WAD task involves learning the relations between words and actions, where pragmatic, semantic, and common-sense knowledge interact with each other. To evaluate the extent how current machine learning models perform on the WAD task, we compare two different models with two different feature representational approaches as baseline models of the dataset. Figure 2 : Distributions of the mean and standard deviation of the 5 raters' ratings for the options in the dataset", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 354, |
| "end": 362, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset Evaluation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Two feature representation approaches are explored in this study. The first approach takes advantage of recent development of contextualized embedding models, specifically BERT (Devlin et al., 2018) , to train a multiple-choice model on the proposed WAD task. Past studies showed that, as a transformer based model, a pre-trained BERT model is learned to represent lexical semantics of words and their syntactic relations within the sentences (Manning, Clark, Hewitt, Khandelwal, & Levy, 2020) . This approach models the syntagmatic aspects of the linguistic inputs.", |
| "cite_spans": [ |
| { |
| "start": 177, |
| "end": 198, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 443, |
| "end": 493, |
| "text": "(Manning, Clark, Hewitt, Khandelwal, & Levy, 2020)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Representation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "However, WAD items are designed to involve more than words' syntagmatic behaviors. Therefore, we devise a second approach to represent the information in items, which is more aimed to capture the paradigmatic relations among the stem and options in an item. Lexical resources, such as ConceptNet (Speer, Chin, & Havasi, 2017) , is incorporated into the model through constructing a semantic graph. The graph has all the words in the dataset as nodes and relations (as defined in lexical resources) as edges. The resulting graph consists of 15,600 nodes and 807,426 edges. The graph contains 633 components (groups of nodes connected with each other), 608 of which are single node components. The largest component is composed of 12,469 nodes. An example of the semantic annotation on an item is shown in Figure 3 . The semantic graph is further encoded into vectors with node embeddings (Grover & Leskovec, 2016) . The hypothesis is that, equipped with paradigmatic and syntagmatic knowledge, the agent performs better in the WAD task.", |
| "cite_spans": [ |
| { |
| "start": 296, |
| "end": 325, |
| "text": "(Speer, Chin, & Havasi, 2017)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 887, |
| "end": 912, |
| "text": "(Grover & Leskovec, 2016)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 804, |
| "end": 812, |
| "text": "Figure 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Feature Representation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The dataset is split into a training set and a validation set with 80% and 20% proportions, respectively. A training example is composed of each of the four options concatenated with the question stem, resulting in a vector of four vectors, each one representing a question-option pair. The model needs to learn the indices of the correct answers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Two models are trained and compared. The first model only uses BERT embedding as input, and a standard multiple-choice readout head, which is composed of a fully-connected layer of 768 hidden units, is stacked upon the output embeddings. The model finally predicts the index of the correct question-option pair. The second model's input includes BERT embeddings and node embeddings derived from the semantic graph. The input sequence of node embeddings is fed into a GRU layer, in which the hidden size is 100. The last hidden states of GRU are transformed with a fully-connected layer and concatenated with the BERT output as generated in the first model. We trained these two models on the WAD dataset with a batch size of 8, and the parameters are optimized with Adam optimizer with a learning rate of 5e-5 for 3 epochs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The first model, with BERT embeddings only, achieved 38.64% accuracy, which was above randomly choosing (25% in a 4-option multiple-choice problem). The model with BERT and semantic graph embeddings achieves better performance with an accuracy of 43.18%. The pattern suggests paradigmatic information is helpful in learning the WAD task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The current model with contextualized embeddings and semantic graph node embeddings can be considered as a tentative baseline performance for the WAD task. Distinctive from the traditional word sense disambiguation task, where word senses are mostly determined by its syntagmatic context, the WAD task deals further with pragmatics and realworld knowledge. These contextual knowledges are only implied in the text. The common-sense knowledge extracted from ConceptNet is a tentative approach that paves the way for a more comprehensive scheme. Such a scheme may involve annotating the common sense or real-world knowledge suggesting relations underlying the question stem and candidate options. Therefore, the connections between the question stem and correct options would be more accessible for a machine learner. The enumerative and discretization of word senses impose profound limitations, both theoretically and computationally, on fine-grained sense inventories. In addition, the relationship between WSD and NLU remains unclear. Even given the success of WSD/sense tagger, how does that WSD process can logically entail the proper understanding of response in context? In this paper, we bring a 'meaning-in-action' philosophy into the WSD field. We identified the relations between words, senses, and actions and emphasize the observable pairs among them, i.e. word-action pairs. We then proposed a new task called \"word-action disambiguation\" (WAD), and its accompanying dataset which consisted of 419 multiple-choice questions. The task is designed to incorporate the semantic, pragmatic, real-world aspects of linguistic uses, and the relations between question and option pairs cannot be reduced to merely lexical semantics. We further evaluate each item with human rating data, to ensure the correctness and clearness of each item. A deep learning model, based on BERT, was trained on the WAD dataset to serve as a baseline performance. We expect the proposal of the WAD task and dataset would shed new light to the current architecture of WSD and motivate computational models to incorporate more complex aspects of human language.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Dataset is available at https://github.com/lopentu/WAD", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was supported by Ministry of Science and Technology (MOST), Taiwan. Grant Number MOST. 108-2634-F-001-006.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgement", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Enriching Word Vectors with Subword Information", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1607.04606" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bojanowski, P., Grave, E, Joulin, A., & Mikolov, T. (2016). Enriching Word Vectors with Subword Infor- mation. Available on arXiv preprint arXiv:1607.04606.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "A Lexical-semantic Analysis of Mandarin Chinese Verbs: Representation and Methodology", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [ |
| "L" |
| ], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [ |
| "J" |
| ], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "R" |
| ], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Computational Linguistics and Chinese Language Processing", |
| "volume": "5", |
| "issue": "", |
| "pages": "1--18", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chang, L. L., Chen, K. J., & Huang, C. R. (2000). A Lexical-semantic Analysis of Mandarin Chinese Verbs: Representation and Methodology. Computa- tional Linguistics and Chinese Language Processing, 5, 1-18.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Optimizing Semantic Granularity for NLP-report on a Lexicographic Experiment", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Cinkov\u00e1", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Holub", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Kr\u00ed\u017e", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 15th EURALEX International Congress", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cinkov\u00e1, S., Martin Holub, Vincent Kr\u00ed\u017e (2012). Opti- mizing Semantic Granularity for NLP-report on a Lexicographic Experiment. In: Proceedings of the 15th EURALEX International Congress.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Pre-Training with Whole Word Masking for Chinese BERT", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Cui", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Che", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1906.08101" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cui, Y., Che, W., Liu, T., Qin, B., Yang, Z., Wang, S., & Hu, G. (2019). Pre-Training with Whole Word Masking for Chinese BERT. arXiv preprint arXiv:1906.08101.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Essays on Actions and Events. Oxford", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Davidson", |
| "suffix": "" |
| } |
| ], |
| "year": 1980, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Davidson, D. (1980). Essays on Actions and Events. Ox- ford: Oxford University Press.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Bert: Pre-training of Deep Bidirectional Transformers for Language Understanding", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "W" |
| ], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1810.04805" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Devlin, J., Chang, M. W., Lee, K., & Toutanova, K. (2018). Bert: Pre-training of Deep Bidirectional Transformers for Language Understanding. arXiv pre- print arXiv:1810.04805.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "node2vec: Scalable Feature Learning for Networks. ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD)", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Grover", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Leskovec", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Grover, A. & Leskovec, J. (2016). node2vec: Scalable Feature Learning for Networks. ACM SIGKDD Inter- national Conference on Knowledge Discovery and Data Mining (KDD).", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "OntoNotes: The 90% Solution", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Ramshaw", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Weischedel", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the Human Language Technology Conference of the NAACL, Companion Volume: Short Papers, NAACL-Short '06", |
| "volume": "", |
| "issue": "", |
| "pages": "57--60", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hovy, E., M. Mitchell, M. Palmer, L. Ramshaw, and R. Weischedel (2006). 'OntoNotes: The 90% Solution'. In: Proceedings of the Human Language Technology Conference of the NAACL, Companion Volume: Short Papers, NAACL-Short '06. Stroudsburg, PA, USA: Association for Computational Linguistics, 57- 60.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "I Don\u02bct Believe in Word Senses", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Kilgarriff", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Computers and the Humanities", |
| "volume": "31", |
| "issue": "", |
| "pages": "91--113", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kilgarriff, A. (1997). 'I Don\u02bct Believe in Word Senses.' Computers and the Humanities 31: 91-113.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Emergent linguistic structure in artificial neural networks trained by self-supervision", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Hewitt", |
| "suffix": "" |
| }, |
| { |
| "first": "U", |
| "middle": [], |
| "last": "Khandelwal", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the National Academy of Sciences", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manning, C. D., Clark, K., Hewitt, J., Khandelwal, U., & Levy, O. (2020). Emergent linguistic structure in artificial neural networks trained by self-supervision. Proceedings of the National Academy of Sciences.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Word Sense Disambiguation: A Survey", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "ACM Computing Surveys", |
| "volume": "41", |
| "issue": "2", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Navigli, R. (2009). Word Sense Disambiguation: A Sur- vey. ACM Computing Surveys, Vol. 41, No. 2.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Making fine-grained and coarse-grained sense distinctions, both manually and automatically", |
| "authors": [ |
| { |
| "first": "Martha", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| }, |
| { |
| "first": "Hoa", |
| "middle": [ |
| "&" |
| ], |
| "last": "Dang", |
| "suffix": "" |
| }, |
| { |
| "first": "Christiane", |
| "middle": [], |
| "last": "Fellbaum", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Natural Language Engineering", |
| "volume": "", |
| "issue": "13", |
| "pages": "137--163", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Palmer, Martha, Dang, Hoa & Fellbaum, Christiane. (2007). Making fine-grained and coarse-grained sense distinctions, both manually and automatically. Natu- ral Language Engineering (13): 137-163.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "The Generative Lexicon", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Pustejovsky", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pustejovsky, J. (1995). The Generative Lexicon. MIT press.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Speech Acts and Illocutionary Logic", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "R" |
| ], |
| "last": "Searle", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Vanderveken", |
| "suffix": "" |
| } |
| ], |
| "year": 1985, |
| "venue": "Logic, Thought and Action. Logic, Epistemology, and the Unity of Science", |
| "volume": "2", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Searle, J.R., Vanderveken D. (1985). Speech Acts and Illocutionary Logic. In: Vanderveken D. (eds) Logic, Thought and Action. Logic, Epistemology, and the Unity of Science, vol 2. Springer, Dordrecht.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Collective Intentions and Actions", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "R" |
| ], |
| "last": "Searle", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "Intentions in Communication", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Searle, J.R. (1990). Collective Intentions and Actions. In P. Cohen, J. Morgan, and M. Pollak (eds.). Inten- tions in Communication, Cambridge, MA: MIT Press.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "ConceptNet 5.5: An Open Multilingual Graph of General Knowledge", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Speer", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Chin", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Havasi", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of AAAI 31", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Speer, R., Chin, J., & Havasi, C. (2017). ConceptNet 5.5: An Open Multilingual Graph of General Knowledge. In Proceedings of AAAI 31.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "A Game-theoretic Approach to Word Sense Disambiguation", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Tripodi", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Pelillo", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Computational Linguistics", |
| "volume": "43", |
| "issue": "1", |
| "pages": "31--70", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tripodi, R. & Pelillo, M. (2017). A Game-theoretic Ap- proach to Word Sense Disambiguation. Computa- tional Linguistics, 43(1):31-70.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "OntoNotes Release 4.0. Philadelphia: Linguistic Data Consortium", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Weischedel", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Weischedel, R., et al. (2011). OntoNotes Release 4.0. Philadelphia: Linguistic Data Consortium.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "A schematic explanation of the relationships between words, word senses, and proposed actions." |
| }, |
| "FIGREF1": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "An example of an annotated item. The question stem is in the middle, surrounded by the four candidate options. The links among them are the semantic relations." |
| }, |
| "TABREF0": { |
| "html": null, |
| "text": "stinky tofu in Gongguan Night Market yesterday. That was great! A. \u96e3\u602a\u5047\u65e5\u7684\u6642\u5019\u4eba\u6f6e\u90fd\u5f88\u591a No wonder it is so crowded on weekends. B. \u505a\u9019\u4e8b\u771f\u8017\u9ad4\u529b\uff0c\u4e0d\u5212\u7b97 It is not worthy of doing such labor-consuming work. C. \u9019\u6a5f\u5668\u592a\u721b\u4e86\u5427\uff0c\u5361\u63d2\u9032\u53bb\u5c31\u62d4\u4e0d\u51fa\u4f86 This machine sucks. You can't get the card back after you insert it.", |
| "num": null, |
| "type_str": "table", |
| "content": "<table/>" |
| } |
| } |
| } |
| } |