| { |
| "paper_id": "W97-0210", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T04:36:50.273236Z" |
| }, |
| "title": "Investigating Complementary Methods for Verb Sense Pruning", |
| "authors": [ |
| { |
| "first": "Hongyan", |
| "middle": [], |
| "last": "Jing", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Columbia University New York", |
| "location": { |
| "postCode": "10027", |
| "region": "N.Y" |
| } |
| }, |
| "email": "hjing@edu" |
| }, |
| { |
| "first": "Vasileios", |
| "middle": [], |
| "last": "Hatzivassiloglou", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Columbia University New York", |
| "location": { |
| "postCode": "10027", |
| "region": "N.Y" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Passonneau", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Columbia University New York", |
| "location": { |
| "postCode": "10027", |
| "region": "N.Y" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Kathleen", |
| "middle": [], |
| "last": "Mckeown", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Columbia University New York", |
| "location": { |
| "postCode": "10027", |
| "region": "N.Y" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We present an approach for tagging verb sense that combines a domain-independent method based on subcategorization and alternations with a domain-dependent method utilizing statistically extracted verb clusters. Initial results indicate that verb senses can be pruned for highly polysemous verbs by up to 74% by the first method and by up to 85% by the second method.", |
| "pdf_parse": { |
| "paper_id": "W97-0210", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We present an approach for tagging verb sense that combines a domain-independent method based on subcategorization and alternations with a domain-dependent method utilizing statistically extracted verb clusters. Initial results indicate that verb senses can be pruned for highly polysemous verbs by up to 74% by the first method and by up to 85% by the second method.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Much work in natural language processing is predicated on the notion that linguistic usage varies sufficiently across different situations of language use that systems can be tailored to a particular sublanguage variety (Kittredge and Lehrberger, 1982) . Biber (1993) presents evidence that a corpus restricted to one or two language registers would exclude \"much of the English language\" by narrowing the lexicon, verb tense and aspect, and syntactic complexity. Such observations inform the increasing trend towards analysis of homogeneous corpora to identify linguistic constraints for use in systems intended to understand or generate coherent discourse. Recent work in this vein includes identification of lexical constraints from textual tutorial dialogue (Moser and Moore, 1995) , constraints on illocutionary act type from spoken task-oriented dialogue (Allen et al., 1995) , prosodic constraints from spoken information-seeking monologues (Hirschberg and Nakatani, 1996) , and constraints on referring expressions from spoken narrative monologue (Passonneau, 1996) . Related work suggests that constraints of different types are interdependent (Biber, 1993; Passonneau and Litman, forthcoming) , hence should be investigated together. Our ultimate goal is to develop methods to tag lexical semantic features in discourse corpora in order to enhance extraction of constraints of the sort just listed. Two types of investigations that would undoubtedly be enhanced are explorations of the interrelation of lexical cohesion and global discourse structure (Morris and Hirst, 1991; Hearst, 1994) , and identification of lexicaliza-: tion patterns for domain-specific concepts (Robin, 1994) .", |
| "cite_spans": [ |
| { |
| "start": 220, |
| "end": 252, |
| "text": "(Kittredge and Lehrberger, 1982)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 255, |
| "end": 267, |
| "text": "Biber (1993)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 762, |
| "end": 785, |
| "text": "(Moser and Moore, 1995)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 861, |
| "end": 881, |
| "text": "(Allen et al., 1995)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 948, |
| "end": 979, |
| "text": "(Hirschberg and Nakatani, 1996)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 1055, |
| "end": 1073, |
| "text": "(Passonneau, 1996)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 1153, |
| "end": 1166, |
| "text": "(Biber, 1993;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1167, |
| "end": 1202, |
| "text": "Passonneau and Litman, forthcoming)", |
| "ref_id": null |
| }, |
| { |
| "start": 1561, |
| "end": 1585, |
| "text": "(Morris and Hirst, 1991;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 1586, |
| "end": 1599, |
| "text": "Hearst, 1994)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 1680, |
| "end": 1693, |
| "text": "(Robin, 1994)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we propose a two-pronged approach to an initial step in lexical semantic tagging, pruning the search space for polysemous verbs. Rather than attempting to identify unique word senses, we aim for the more realistic goal of pruning sense information. We will then incrementally evaluate the utility of tagging corpora with pruned sense sets for different types of discourse. We begin with verbs on the hypothesis that verb sense distinctions correlate with syntactic properties of verbs (Levin, 1993) . Our initial results indicate that domain-independent syntactic information reduces potential verb senses for multiply polysemous verbs (five or more WordNet senses) by more than 50%. In Section 2, we outline our first method, based on domain-independent lexical knowledge, presenting results from an analysis of thousands of verbs. In the section following that, we present our complementary method, a technique utilizing verb clusters automatically computed from corpus data. In the conclusion, we discuss how the combination of the two methods increases the performance of our system and enhances the robustness of the final results.", |
| "cite_spans": [ |
| { |
| "start": 500, |
| "end": 513, |
| "text": "(Levin, 1993)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "A given word may have n distinct senses and appear within m different syntactic contexts, but typically, not all n x m combinations are valid. The syntactic context can partly disambiguate the semantic content. For example, when the verb question has a that-clause complement, it cannot have the sense of \"ask\", but rather must have the sense of \"challenge\". To identify such interacting syntactic and semantic constraints at the lexical level, we utilize three knowledge bases for verbs: * The COMLEX database (Grishman et al., 1994; Macleod and Grishman, 1995) , which includes detailed subcategorization information for each verb, and some adjectives and nouns. (Levin, 1993) . Alternations include syntactic transformations such as thereinsertion (e.g., A ship appeared on the horizon ---, There appeared a ship on the horizon) and locative-inversion (e.g., --* On the horizon there appeared a ship). Much in the same way as subcategorization frames, alternations are constrained by the sense of the word; for example, the verb appear allows there-insertion and locative-inversion in its senses of \"come into being\" or \"become visible\", but not in its senses of \"come out\" or \"participate in a play\". \u2022 WordNet's (Miller et al., 1990) hierarchical semantic classification. WordNet supplies links between semantically related senses as encoded in synonym sets (synsets). Though many words are polysemous, Miller et al. (1990) argue that a set of synonymous or nearly synonymous words can serve to identify the single lexical concept they have in common. It also supplies limited subcategorization information, in the form of allowed sentential frames (\"verb frames\") for each sense.", |
| "cite_spans": [ |
| { |
| "start": 511, |
| "end": 534, |
| "text": "(Grishman et al., 1994;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 535, |
| "end": 562, |
| "text": "Macleod and Grishman, 1995)", |
| "ref_id": null |
| }, |
| { |
| "start": 665, |
| "end": 678, |
| "text": "(Levin, 1993)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 1217, |
| "end": 1238, |
| "text": "(Miller et al., 1990)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1408, |
| "end": 1428, |
| "text": "Miller et al. (1990)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Exploiting domain-independent syntactic clues", |
| "sec_num": "2" |
| }, |
| { |
| "text": "WordNet contains the needed information on permissible combinations of syntactic context and semantic content, but its subcategorization information is limited. Thirty-five different subcategorization frames are used for all verbs in WordNet, and the frames supplied are partial. COMLEX provides more detailed specifications of the syntactic frames for each verb (92 distinct subcategorization types). The allowed alternations (which we encoded in machine-readable form from the detailed rules supplied in (Levin, 1993) ) provide additional constraints. Mapping the more precise syntactic information in COMLEX to the verb frames of WordNet allows the construction of a more detailed syntactic entry for each word sense, and enables the association of alternation constraints with the senses in WordNet. In the future, it will also allow us to use corpora tagged with COMLEX subcategorization frames, e.g., (Macleod et al., 1996) .", |
| "cite_spans": [ |
| { |
| "start": 506, |
| "end": 519, |
| "text": "(Levin, 1993)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 907, |
| "end": 929, |
| "text": "(Macleod et al., 1996)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Exploiting domain-independent syntactic clues", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We have manually constructed a table that maps WordNet syntactic constraints to the ones used in COMLEX (and vice versa) and another that maps allowed alternations from (Levin, 1993) to COM-LEX or WordNet syntactic frames. A program consuits the three databases and the mapping tables and, for each word occurrence constructs a list of the senses that are compatible with the syntactic constraints. During this process, a detailed entry for the word is formed, containing both syntactic and semantic information. The resulting entries comprise a rich lexical resource that we plan to use for text generation and other applications (Jing et al., 1997) .", |
| "cite_spans": [ |
| { |
| "start": 169, |
| "end": 182, |
| "text": "(Levin, 1993)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 631, |
| "end": 650, |
| "text": "(Jing et al., 1997)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Exploiting domain-independent syntactic clues", |
| "sec_num": "2" |
| }, |
| { |
| "text": "For a specific example, consider the verb appear. The pertinent information in the three databases for this word is listed in parts (a)-(c) of Figure 1 . For 59 (VERB :ORTH \"appear\" :SUBC ((PP-TO-INF-RS :PVAL (\"to\")) (PP-PRED-RS :PVAL (\"to\" \"of\" \"under\" \"against\" \"in favor Of\" \"before .... at\") ) ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 143, |
| "end": 151, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Exploiting domain-independent syntactic clues", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "(EXTI~P-TO-NP-S) (INTRAmS) (SEEK-S) (SEEN-T0-NP-S) (TO-INF-RS) (NP-PRF.D-RS) (ADJP-PRKI)-RS) (ADVP-PRm-RS) (AS-NP)))", |
| "eq_num": "(" |
| } |
| ], |
| "section": "Exploiting domain-independent syntactic clues", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 > Something s Adjective/Noun \u2022 > Somebody. s Adjective (c) WordNet sense-syntax constraints for appear example, one of the subcategorization frames of appear in part (a), aDJP-PRKD-R$, indicates a predicate adjective with subject raising, as in He appeared confused. Part (b) of Figure 1 lists no alternations that are applicable to this subcategorization frame, while part (c) shows only two Word-Net synsets where appear takes an adjectival complement, senses $1 and $8. The complex entry of Figure 2 is produced automatically from these three types of lexical information. The resulting syntaxsemantics restriction matrix for appear is shown in Table 1 . When appear is encountered in a particular syntactic structure, the program consults the ( appear ((I ((PP-T0-Ir~-RS :PVAL (\"to\") :", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 281, |
| "end": 289, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 496, |
| "end": 504, |
| "text": "Figure 2", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 650, |
| "end": 657, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sense 8 (have an outeaxd expression)", |
| "sec_num": null |
| }, |
| { |
| "text": "SO ((sb, --))) (T0,IIIF-RS :SO ((sb, --))) (NP-PRED-RS :SO ((sb, --) (sth, --))) (ADJP-PRED-RS :SO ((sb, --) (sth, --))))) (ADVP-PRED-RS :SO ((sb, --) (sth, --)))))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense 8 (have an outeaxd expression)", |
| "sec_num": null |
| }, |
| { |
| "text": "(2 ((PP-T0-INF-RS :PVAL (\"to\") :SO ((sb, --) (sth, --))) (PP-PRED-RS :PVAL (\"to\" \"of\" \"under\" \"agaSnst\" \"in favor of\" \"before\" \"at\") :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense 8 (have an outeaxd expression)", |
| "sec_num": null |
| }, |
| { |
| "text": "SO ((sb,--) (sth, --))) (INTRANS :SO ((sb, --) (sth, -))) (AS-~P :so ((sb, -) (sth, -))) (LOCPP :SO ((sb, --) (sth, --))) (INTRANS THERE-V-SUBJ :ALT there-insertion :SO ((sb, --) (sth, -))) (LOCPP LOCPP-V-SUBJ :ALT locative-inversion :SO ((sb, --) (sth, --)))))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense 8 (have an outeaxd expression)", |
| "sec_num": null |
| }, |
| { |
| "text": "CS ( restriction matrix to eliminate senses that can be excluded. In the case of appear, only 47 cells of the 8 x 23 matrix represent possible combinations of syntactic patterns with senses, corresponding to a 74.5% reduction in ambiguity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense 8 (have an outeaxd expression)", |
| "sec_num": null |
| }, |
| { |
| "text": "Due to incompatibilities between the COMLEX and WordNet representations of syntactic information, and the differences in coverage, the process of linking the information sources can in some cases \u2022 result in relatively underspecified rows of a restriction matrix, or to spurious cells. For example, the frame ADVP-PRED-RS in Table I occurs in COMLEX but does not correspond to any of the more general frames mentioned in WordNet. Rather than having no appropriate senses for this syntactic pattern, we map it to WordNet's verb frames \"Something s Adjective/Noun\" and \"Somebody s Adjective\" by analyzing experiment results regrsssively.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 325, |
| "end": 332, |
| "text": "Table I", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sense 8 (have an outeaxd expression)", |
| "sec_num": null |
| }, |
| { |
| "text": "On the other hand, the entry for $2 in the PP-TO-IIIF-RS frame for appear represents a spurious entry: appear does not occur in the $2 meaning of \"become visible\" with a to-prepositional phrase and a subject-controlled infinitive. In a sentence with this syntactic structure, such as '~fhe river appeared to the residents to be rising too rapidly\", appear can take only senses $1 and $6 for animate subjects and senses $3 and $7 for inanimate subjects. Yet the cell for $2 x PP-T0-IIIF-RS is generated in our matrix because of the overly general specification of verb frames in WordNet. We have chosen to risk overgeneration in these cases at present, rather than accidentally eliminating a valid sense. Eliminating spurious cells by hand would be time-consuming and error-prone, but the automatic classification method we report in the next section may help prune them. Also, as reported elsewhere (Jing et al., 1997) , we are extending our lexical resource with annotations of frequency information for each sense-subcategorization pair, derived from sense-tagged corpus data. As data is accumulated, zero frequency could be taken to represent less valid usages.", |
| "cite_spans": [ |
| { |
| "start": 899, |
| "end": 918, |
| "text": "(Jing et al., 1997)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense 8 (have an outeaxd expression)", |
| "sec_num": null |
| }, |
| { |
| "text": "We have performed preliminary evaluation tests of our method for tagging verb occurrences with pruned word sense tags using the Brown corpus. The first step of the method is to identify the subcategorization pattern for a specific verb token. Here we rely on heuristics to identify the major constituents to the left and right of a verb token, as described in (Jing et al., 1997) . After hypothesizing the subcategorization pattern for a specific verb token, we use our sense restriction matrices (as in Table 1) to tag the verb token with a pruned set of senses. We evaluate the resulting sense tag against the version of the Brown corpus that has been hand-tagged with WordNet senses (Miller et al., 1993) . For appear, which we use as an example throughout this paper, we find 100 tokens in the Brown corpus. Of these, 46 are intransitive or have a locative prepositional phrase complement. Our method tags each of these tokens with two or three possible senses, and in all but one case, the sense tag includes the valid sense. Another 31 tokens are followed by to and a subject-controlled infinitive. In all these cases, our method makes a single, correct prediction out of the eight possible senses. For all 100 uses of appear in the corpus, the average number of possible senses predicted by our method is 1.99. We find a 75-76% reduction of possible senses (depending on whether we use the additional something~somebody selectional constraints), with only 2-3% of the tags being incorrect.", |
| "cite_spans": [ |
| { |
| "start": 360, |
| "end": 379, |
| "text": "(Jing et al., 1997)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 686, |
| "end": 707, |
| "text": "(Miller et al., 1993)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 504, |
| "end": 512, |
| "text": "Table 1)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sense 8 (have an outeaxd expression)", |
| "sec_num": null |
| }, |
| { |
| "text": "For the 5,676 verbs present in all three databases, the average reduction in ambiguity was 36.82% for words with two to four senses, 59.36% for words with five to ten senses, and 73.86% for words with more than ten senses; the overall average for all polysemous words was 47.91%. rather, the presence of a bar for a category corresponding to more than 10 senses indicates that at least one verb falls in that category.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense 8 (have an outeaxd expression)", |
| "sec_num": null |
| }, |
| { |
| "text": "in our databases. The most polysemous verb in our databases, run, is identified as having 41 senses. About half the verbs have more than one sense, and 20% have more than two. Our method performs better on the more polysemous words, which axe the most difficult to prune. This increased difficulty applies even to statistical methods because of the large number of alternatives and the likely closeness in meaning among them. Selecting a subset of almost synonymous verb senses is significantly harder than, for example, disambiguating bank between the \"edge of river\" and '~financial institution\" senses.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense 8 (have an outeaxd expression)", |
| "sec_num": null |
| }, |
| { |
| "text": "Using domain-dependent semantic classifications to identify predominant senses", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "3", |
| "sec_num": null |
| }, |
| { |
| "text": "The process outlined above has two significant advantages: first, it can be automatically applied, assuming a robust method for parsing the relevant verb phrase context (the experiments presented in (Pustejovsky et al., 1993) depend on the same type of information). Second, it reduces the ambiguity of a given word without sacrificing accuracy, insofar as the three input knowledge sources are accurate. To further restrict the size of the set of valid senses produced, we are currently exploring domaindependent, automatically constructed semantic classifications. Semantic classification programs (Brown et al., 1992; Hatzivassiloglou and McKeown, 1993; Pereira et al., 1993) use statistical information based on cooccurrence with appropriate marker words to partition a set of words into semantic groups or classes.", |
| "cite_spans": [ |
| { |
| "start": 199, |
| "end": 225, |
| "text": "(Pustejovsky et al., 1993)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 600, |
| "end": 620, |
| "text": "(Brown et al., 1992;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 621, |
| "end": 656, |
| "text": "Hatzivassiloglou and McKeown, 1993;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 657, |
| "end": 678, |
| "text": "Pereira et al., 1993)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "3", |
| "sec_num": null |
| }, |
| { |
| "text": "For example, using head nouns that occur with premodifying adjectives as one type of marker word, the adjective set {blue, cold, green, hot, red} can be partitioned into the subsets (l~r, ical fields (Lehrer, 1974) ) {blue, green, red} and .{cold, hot}. Automatic classification programs can achieve high performance, near that of humans on the same task, when supplied with enough da~a and with appropriate syntactic constraints (see (Hatzivassiloglou, 1996) for a detailed evaluation). However, given that each word must be assigned to one class independently of context, 1 the problem of ambiguity is \"solved\" by placing each word in the class where it fits best; that is, in the class dictated by the predominant sense of the word in the training text.", |
| "cite_spans": [ |
| { |
| "start": 200, |
| "end": 214, |
| "text": "(Lehrer, 1974)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 435, |
| "end": 459, |
| "text": "(Hatzivassiloglou, 1996)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "3", |
| "sec_num": null |
| }, |
| { |
| "text": "While this might be a limitation of partitioning methods for lexicographical purposes, it offers an advantage for our task. By an indirect route, it allows the automatic identification of the predominant sense of a word in a given text or subject topic. It is indirect because the actual result is groups of word forms, but we presume each group to represent a relatively homogeneous semantic class. Thus we presume that the relevant sense of a given word form in a group is in the same lexical field as the senses of the other word forms in the same group. The process is highly domain-dependent, i.e., the same set of words will be partitioned in different ways when the domain changes. For example, when our word grouping system (Hatzivassiloglou and McKeown, 1993) classified about 280 frequent adjectives in stock market reports, it formed, among others, the cluster {common, preferred}. This cluster would look odd were not the domain considered. ~ This information on predominant senses for each word form in a given corpus can be computed automatically, but remains implicit. To map the results onto word sense associations, and thus explicitly identify the predominant senses, we utilize the links between senses provided by WordNet. We note that while words like question and ask are ultimately connected in WordNet, the actual connections are only between some of the senses of the two words.", |
| "cite_spans": [ |
| { |
| "start": 732, |
| "end": 768, |
| "text": "(Hatzivassiloglou and McKeown, 1993)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "3", |
| "sec_num": null |
| }, |
| { |
| "text": "Similarly, the words question and dispute are also connected, but through a different subset of senses. Thus, if the automatically induced semantic classification indicates that the predominant sense of question is associated with dispute rather than with ask (by placing question and dispute but not ask in the same group), we can infer which of the WordNet senses of question is the predominant one in this domain. The algorithm involves the following steps: aSome systems produce \"soft s clusters, where words can belong into more than one group. These can be converted to non-overlapping groups for the purposes of this discussion by assigning each word to the group for which it has the highest membership coefficient.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "3", |
| "sec_num": null |
| }, |
| { |
| "text": "2In this domain, the two adjectives are complementaxies, describing the two types of issued stock shares.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "3", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 Construct the domain-dependent word classification. \u2022 For each word z, let Y -{YI,Y2,...} be the set of other words placed in the same semantic group with z. * For each I~ 6 Y, traverse the WordNet hierarchy and locate the (set of) senses of z, Si, that are connected with some sense of ~. The distance and the types of links that can be traversed while still considering two senses \"related\" can be heuristically determined; alternatively, we can use a measure of semantic distance such as those proposed in (Resnik, 1995) or . \u2022 Finally, the union of the sets S~ contains the predominant sense of x. While in the general case it is possible to have multiple links between word forms (corresponding to different sense pairings), typically each Si will contain only one sense, and their union will contain a few elements. This set ~ can be further reduced, e.g., by giving more weight to senses supported by more than one of the ~'s or by unambiguous Y~'s.", |
| "cite_spans": [ |
| { |
| "start": 511, |
| "end": 525, |
| "text": "(Resnik, 1995)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "3", |
| "sec_num": null |
| }, |
| { |
| "text": "For a concrete example, consider the verb question, which can have, among others, the senses of dispute (sense 1 in WordNet) or inquire (sense 3 in WordNet). If we consider a sense as linked with one of the senses of question if it is in the maximal subtree which includes that sense but no other senses of question, we find the following links between question and the verbs ask, inquire, chal. lenge, and dispute: (question1, asks), (questiou~, asks) , (questions, asks) , (questions, inquire~), and (question1, challenge~) . Thus, if question is placed in the same semantic group with ask and inquire, the three senses {1, 2, 3} survive out of the five senses of question, with a preference for sense 3. If, on the other hand, question is classified with challenge and dispute, only sense 1 survives.", |
| "cite_spans": [ |
| { |
| "start": 435, |
| "end": 452, |
| "text": "(questiou~, asks)", |
| "ref_id": null |
| }, |
| { |
| "start": 455, |
| "end": 472, |
| "text": "(questions, asks)", |
| "ref_id": null |
| }, |
| { |
| "start": 475, |
| "end": 525, |
| "text": "(questions, inquire~), and (question1, challenge~)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "3", |
| "sec_num": null |
| }, |
| { |
| "text": "We performed an experiment analyzing a specific verb group produced by one semantic clustering program (McMahon and Smith, 1996) . This group contains 19 verbs, all but one of them ambiguous, including ask, call, charge, regard, say, and wish.", |
| "cite_spans": [ |
| { |
| "start": 103, |
| "end": 128, |
| "text": "(McMahon and Smith, 1996)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "3", |
| "sec_num": null |
| }, |
| { |
| "text": "We measured for each sense of the 19 words how many of the other words have at least one sense linked with that sense in WordNet (in the same toplevel verb sense tree). The results, part of which is shown in Table 2 , indicate that some senses are much more strongly connected with the other words in the group, and so probably predominate in the corpus that was used to induce the group. For example, one of the senses of ask, \"require\" (as in This job asks (for) long hours) is not linked to any of the other 18 words in the cluster, and should therefore be removed. If, for each word w we analyze, we require that each of its probable senses be linked to at least a fixed percentage (e.g., one-third) of the total number of words linked to to, we can eliminate Table 3 : Reduction in ambiguity and sense tagging error rate for the cluster-based method, as measured for five verbs on the J part of the Brown corpus.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 208, |
| "end": 215, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 764, |
| "end": 771, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "3", |
| "sec_num": null |
| }, |
| { |
| "text": "many of the senses as improbable. The achieved reduction in ambiguity (for the 18 ambiguous words) ranges from 20% to 84.62% (including cases of full disambiguation), and its average for all 18 words is 55.89%.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "3", |
| "sec_num": null |
| }, |
| { |
| "text": "In another experiment, we looked at a specific corpus, taking into account the frequency distribution of the verbs in it. We selected the J part of the Brown corpus, which focuses on learned knowledge (the Natural Sciences, Mathematics, Medicine, the Humanities, etc.) (Ku~era and Francis, 1967) . This part of the corpus is more homogeneous and contains a larger number of articles (80). The increased homogeneity makes it suitable for investigating our hypothesis of predominant verb senses.", |
| "cite_spans": [ |
| { |
| "start": 269, |
| "end": 295, |
| "text": "(Ku~era and Francis, 1967)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "3", |
| "sec_num": null |
| }, |
| { |
| "text": "We selected five verbs from this sub-corpus (show, describe, present, prove, and introduce) , and applied our algorithm assuming that the predominant senses of these verbs axe linked together and consequently, that the five verbs would be placed in the same group by the clustering program.", |
| "cite_spans": [ |
| { |
| "start": 44, |
| "end": 91, |
| "text": "(show, describe, present, prove, and introduce)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "3", |
| "sec_num": null |
| }, |
| { |
| "text": "Under this assumption, we measured the reduction in ambiguity (number of possible senses) for each verb (types) as well as over all occurrences of the five verbs in the sub-corpus (tokens) when the cluster-based algorithm is applied. We also counted how many of the verbs receive a wrong tag, i.e., a set of senses that does not include the hand-assigued one. The results of these experiments are shown in Table 3 . We observe that the cluster-based method achieves a 49.27% reduction in the number of senses -when measured on types. When the distribution of the words is factored in, the corresponding measure on tokens (which better describes the applicability of the method in practice) is 38.00%. The average error rate is 8.48%; this average is driven up by the inclusion of present, prove, and introduce in our test set. The relatively high error rate for these verbs may be due to their low frequency in our corpus, or may indicate that their predominant senses axe not associated with the predominant senses of show and describe as we hypothesized.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 406, |
| "end": 413, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "3", |
| "sec_num": null |
| }, |
| { |
| "text": "Combining the two methods", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "4", |
| "sec_num": null |
| }, |
| { |
| "text": "While the syntactic constraints method almost always produces a semantic tag that includes the correct sense for a verb, 3 it has no capability to further distinguish the surviving senses in the tag. The semantic link-based method, on the other hand, can eliminate some senses from this tag. By applying the two methods in tandem and intersecting the sense sets produced by them, we can reduce the size of the final tag. Using the verb \"show\" of the experiment described in the previous section as an illustration, we note that whenever the verb takes only a direct object, the syntactic method eliminates three of the thirteen possible senses while always retaining the ZAssuming no gaps in the subcategorization information for this verb in COMLEX and WordNet. correct sense in the produced tag (error rate 0%). For the same verb and subcal~.egorization pattern, the cluster-based method rejects four of the thirteen senses with error rate 5% (i.e, 3 out of 58 occurrences in the Y part of the Brown Corpus will be assigned wrong tags). The intersection of the two methods increases the number of rejected senses to five. It reduces the ambiguity by 38% but has the combined error rate of both methods, in this case 5%.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "4", |
| "sec_num": null |
| }, |
| { |
| "text": "As we see from this experiment, the integration of the two methods can improve the reduction rate of ambiguity, but may slightly increase the error rate. We are investigating ways to stratify the application of the cluster-based method on appropriate groups of tokens identified by the syntactic method, by separately clustering tokens of the same verb that appear in different syntactic frames. We expect that this will partly alleviate the increase in the error rate.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "4", |
| "sec_num": null |
| }, |
| { |
| "text": "Our method for using detailed knowledge about verb subcategorizations and alternations to prune verb senses is domain independent. It also prunes senses without loss of correctness. By intersecting the resulting sense sets with the output of our clusterbased method, verb senses can be pruned further. In using the clustering method's output, we make two further assumptions. Previous work has shown that within a given discourse (Gale et al., 1992) , or with respect to a given collocation (Yarowsky, 1993) , a word appears in only one sense. By extrapolation, we will assume that words appear in only one sense within a homogeneous corpus, 4 except for certain high frequency verbs or for semantically empty support verbs. We will assign this predominant sense to all non-disambignated occurrences of a verb. While this provides a reasonable default, the resulting semantic tag has to be considered provisional, and validated independently. Also, we currently assume that words placed in the same group will share relatively few links (connecting pairs of competing senses) in WordNet. This is supported by our initial experiments, but is an issue we will continue to investigate.", |
| "cite_spans": [ |
| { |
| "start": 430, |
| "end": 449, |
| "text": "(Gale et al., 1992)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 491, |
| "end": 507, |
| "text": "(Yarowsky, 1993)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Above we gave some preliminary evaluation results; we plan to carry out a more complete evaluation of our system by continuing to use the handtagged (with WordNet senses) Brown corpus (Miller et al., 1993) as the initial evaluation standard. Each stage will be separately measured, as well as their combined effectiveness in pruning senses. We anticipate that the use of multiple methods to investigate sense pruning will lead to more robust results. In addition, we believe that the two methods can be interleaved in the following manner: Both methods rely tOt a few predominant senses, that can perhaps be disambigu&ted using syntactic constraints as we discuss below.", |
| "cite_spans": [ |
| { |
| "start": 184, |
| "end": 205, |
| "text": "(Miller et al., 1993)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "on recognizing features of the local syntactic context of a verb occurrence; the look-up method uses the local syntactic context to identify the likely subcategorization pattern while the automatic classification method uses the local syntactic context to extract marker words. The look-up method can tag distinct tokens of the same verb with distinct senses if the subcategorization patterns are distinct and correlate with distinct senses. The automatic classification method could be extended to classify sense sets, using as its input corpus the output of the syntactic constraints look-up method, where verb tokens have been tagged with a subset of the full collection of senses. In principle, this would make it possible to use the automatic classification method on a more heterogeneous corpus, i.e., where the same verb occurs frequently with two distinct senses.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "Number of other words in group linked with given sense $2 83 54 55 86 $7 58 59 S10 Sll S12 S13 9 9 9 0 9 0 0 9 1 9 9 1 9 2 0 9 9 3 3 2 0 0 2 3 0 2 3 1 9 3 0 0 0 0 0 '9 5 1 1 5 0 0 0 I say 9 2 0 9 0 9 1 9 wish 7 2 2 1 2 2 9 9 ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 113, |
| "end": 277, |
| "text": "9 9 1 9 2 0 9 9 3 3 2 0 0 2 3 0 2 3 1 9 3 0 0 0 0 0 '9 5 1 1 5 0 0 0 I say 9 2 0 9 0 9 1 9 wish 7 2 2 1 2 2 9 9", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "annex", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "The TRAINS project: A case study in defining a conversational planning agent", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [ |
| "F" |
| ], |
| "last": "Allen", |
| "suffix": "" |
| }, |
| { |
| "first": "Lenhart", |
| "middle": [ |
| "K" |
| ], |
| "last": "Schubert", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Fergnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Heeman", |
| "suffix": "" |
| }, |
| { |
| "first": "Chung", |
| "middle": [ |
| "Hee" |
| ], |
| "last": "Hwang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tsuneaki", |
| "middle": [], |
| "last": "Kato", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Light", |
| "suffix": "" |
| }, |
| { |
| "first": "Nathaniel", |
| "middle": [ |
| "G" |
| ], |
| "last": "Martin", |
| "suffix": "" |
| }, |
| { |
| "first": "Bradford", |
| "middle": [ |
| "W" |
| ], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Massimo", |
| "middle": [], |
| "last": "Poesio", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "R" |
| ], |
| "last": "Tranm", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "/Ezperimental and Theoretical AI", |
| "volume": "7", |
| "issue": "", |
| "pages": "7--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James F. Allen, Lenhart K. Schubert, George Fergn- son, Peter Heeman, Chung Hee Hwang, Tsuneaki Kato, Marc Light, Nathaniel G. Martin, Brad- ford W. Miller, Massimo Poesio, and David R. Tranm. 1995. The TRAINS project: A case study in defining a conversational planning agent. Jour- nal o/Ezperimental and Theoretical AI, 7:7-48.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Using register-diversified corpora for general language studies", |
| "authors": [ |
| { |
| "first": "Douglas", |
| "middle": [], |
| "last": "Biber", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "June. Special Issue on Using Large Corpora: II", |
| "volume": "19", |
| "issue": "2", |
| "pages": "219--242", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Douglas Biber. 1993. Using register-diversified cor- pora for general language studies. Computational Linguistics, 19(2):219-242, June. Special Issue on Using Large Corpora: II.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Class-based n-gram models of natural language", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Brown", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Vincent", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "V" |
| ], |
| "last": "Della Pietra", |
| "suffix": "" |
| }, |
| { |
| "first": "Jennifer", |
| "middle": [ |
| "C" |
| ], |
| "last": "De Souza", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [ |
| "L" |
| ], |
| "last": "Lai", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mercer", |
| "suffix": "" |
| } |
| ], |
| "year": 1992, |
| "venue": "Computational Linguistics", |
| "volume": "18", |
| "issue": "4", |
| "pages": "467--479", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter F. Brown, Vincent J. della Pietra, Peter V. de Souza, Jennifer C. Lai, and Robert L. Mercer. 1992. Class-based n-gram models of natural lan- guage. Computational Linguistics, 18(4):467-479.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "One sense per discourse", |
| "authors": [ |
| { |
| "first": "William", |
| "middle": [ |
| "A" |
| ], |
| "last": "Gale", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenneth", |
| "middle": [ |
| "W" |
| ], |
| "last": "Church", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Yarowsky", |
| "suffix": "" |
| } |
| ], |
| "year": 1992, |
| "venue": "Proceedings of the ~th DARPA Speech and Natural Language Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William A. Gale, Kenneth W. Church, and David Yarowsky. 1992. One sense per discourse. In Proceedings of the ~th DARPA Speech and Natural Language Workshop, February.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "COMLEX syntax: Building a computational lexicon", |
| "authors": [ |
| { |
| "first": "Ralph", |
| "middle": [], |
| "last": "Grishman", |
| "suffix": "" |
| }, |
| { |
| "first": "Catherine", |
| "middle": [], |
| "last": "Macleod", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Meyers", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "Proceedings o/COLING-9~", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ralph Grishman, Catherine Macleod, and Adam Meyers. 1994. COMLEX syntax: Building a com- putational lexicon. In Proceedings o/COLING-9~, Kyoto, Japan, August.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Towards the automatic identification of adjectival scales: Clustering adjectives according to meaning", |
| "authors": [ |
| { |
| "first": "Vasileios", |
| "middle": [], |
| "last": "Hatzivassiloglou", |
| "suffix": "" |
| }, |
| { |
| "first": "Kathleen", |
| "middle": [], |
| "last": "Mckeown", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Proceedings of the 31st Annual Meeting o/the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "172--182", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vasileios Hatzivassiloglou and Kathleen McKeown. 1993. Towards the automatic identification of ad- jectival scales: Clustering adjectives according to meaning. In Proceedings of the 31st Annual Meet- ing o/the Association for Computational Linguis- tics, pages 172-182, Columbus, Ohio, June.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Do we need linguistics when we have statistics? A comparative analysis of the contributions of linguistic cues to a statistical word grouping system", |
| "authors": [ |
| { |
| "first": "Vasileios", |
| "middle": [], |
| "last": "Hatzivassiloglou", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vasileios Hatzivassiloglou. 1996. Do we need lin- guistics when we have statistics? A comparative analysis of the contributions of linguistic cues to a statistical word grouping system. In Judith L.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "The Balancing Act: Combining Symbolic and Statistical Approaches to Language", |
| "authors": [ |
| { |
| "first": "Philip", |
| "middle": [ |
| "S" |
| ], |
| "last": "Klavans", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Resnik", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "67--94", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Klavans and Philip S. Resnik, editors, The Bal- ancing Act: Combining Symbolic and Statistical Approaches to Language, pages 67-94. The MIT Press, Cambridge, Massachusetts.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Multi-paragraph segmentation of expository text", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Mufti", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hearst", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "Proceedings of the 3$nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "9--16", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mufti A. Hearst. 1994. Multi-paragraph segmenta- tion of expository text. In Proceedings of the 3$nd Annual Meeting of the Association for Computa- tional Linguistics, pages 9-16, Las Cruces, New Mexico.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "A prosodic analysis of discourse segments in direction-giving monologues", |
| "authors": [ |
| { |
| "first": "Julia", |
| "middle": [], |
| "last": "Hirschberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Christine", |
| "middle": [ |
| "H" |
| ], |
| "last": "Nakatani", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Proceedings of the 34th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "286--293", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Julia Hirschberg and Christine H. Nakatani. 1996. A prosodic analysis of discourse segments in direction-giving monologues. In Proceedings of the 34th Annual Meeting of the Association for Computational Linguistics, pages 286-293, Santa Cruz, California, June.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Building a rich large-scale lexteal base for generation", |
| "authors": [ |
| { |
| "first": "Hongyan", |
| "middle": [], |
| "last": "Jing", |
| "suffix": "" |
| }, |
| { |
| "first": "Kathleen", |
| "middle": [], |
| "last": "Mekeown", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Passonneau", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Submitted to the 35th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hongyan Jing, Kathleen MeKeown, and Rebecca Passonneau. 1997. Building a rich large-scale lex- teal base for generation. Submitted to the 35th Annual Meeting of the Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Sublanguage: Studies of Language in Restricted Semantic Domains", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Kittredge", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Lehrberger", |
| "suffix": "" |
| } |
| ], |
| "year": 1982, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. Kittredge and J. Lehrberger, editors. 1982. Sub- language: Studies of Language in Restricted Se- mantic Domains. De Gruyter, Berlin.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Computational Analysis of Present-Day American English", |
| "authors": [ |
| { |
| "first": "Henry", |
| "middle": [], |
| "last": "Ku~era", |
| "suffix": "" |
| }, |
| { |
| "first": "W. Nelson", |
| "middle": [], |
| "last": "Francis", |
| "suffix": "" |
| } |
| ], |
| "year": 1967, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Henry Ku~era and W. Nelson Francis. 1967. Com- putational Analysis of Present-Day American En- glish. Brown University Press, Providence, Rhode Island.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Semantic Fields and Lezical Structure", |
| "authors": [ |
| { |
| "first": "Adrienne", |
| "middle": [], |
| "last": "Lehrer", |
| "suffix": "" |
| } |
| ], |
| "year": 1974, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adrienne Lehrer. 1974. Semantic Fields and Lezical Structure. North Holland, Amsterdam and New York.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "English Verb Classes and Alternations: A Preliminary Investigation", |
| "authors": [ |
| { |
| "first": "Beth", |
| "middle": [], |
| "last": "Levin", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Beth Levin. 1993. English Verb Classes and Alter- nations: A Preliminary Investigation. University of Chicago Press, Chicago, Illinois.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "The influence of tagging on the classification of lexical complements", |
| "authors": [ |
| { |
| "first": "Catherine", |
| "middle": [], |
| "last": "Macleod", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Meyers", |
| "suffix": "" |
| }, |
| { |
| "first": "Ralph", |
| "middle": [], |
| "last": "Grishman", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Proceedings of COLING-96", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Catherine Macleod, Adam Meyers, and Ralph Gr- ishman. 1996. The influence of tagging on the classification of lexical complements. In Proceed- ings of COLING-96, Copenhagen, Denmark.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Improving statistical language model performance with automatically generated word hierarchies", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [], |
| "last": "John", |
| "suffix": "" |
| }, |
| { |
| "first": "Francis", |
| "middle": [ |
| "J" |
| ], |
| "last": "Mcmahon", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Computational Linguistics", |
| "volume": "22", |
| "issue": "2", |
| "pages": "217--247", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John G. McMahon and Francis J. Smith. 1996. Im- proving statistical language model performance with automatically generated word hierarchies. Computational Linguistics, 22(2):217-247, June.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Introduction to WordNet: An on-line lexical database", |
| "authors": [ |
| { |
| "first": "George", |
| "middle": [ |
| "A" |
| ], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Beckwith", |
| "suffix": "" |
| }, |
| { |
| "first": "Christiane", |
| "middle": [], |
| "last": "Fellbaum", |
| "suffix": "" |
| }, |
| { |
| "first": "Derek", |
| "middle": [], |
| "last": "Gross", |
| "suffix": "" |
| }, |
| { |
| "first": "Katherine", |
| "middle": [ |
| "J" |
| ], |
| "last": "Miller", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "International Journal of Lexicography (special issue)", |
| "volume": "3", |
| "issue": "4", |
| "pages": "235--312", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George A. Miller, Richard Beckwith, Christiane Fell- baum, Derek Gross, and Katherine J. Miller. 1990. Introduction to WordNet: An on-line lexi- cal database. International Journal of Lexicogra- phy (special issue), 3(4):235-312.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "A semantic concordance", |
| "authors": [ |
| { |
| "first": "George", |
| "middle": [ |
| "A" |
| ], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Claudia", |
| "middle": [], |
| "last": "Leacock", |
| "suffix": "" |
| }, |
| { |
| "first": "Randee", |
| "middle": [], |
| "last": "Tengi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ross", |
| "middle": [ |
| "T" |
| ], |
| "last": "Bunker", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George A. Miller, Claudia Leacock, Randee Tengi, and Ross T. Bunker. 1993. A semantic concor- dance. Cognitive Science Laboratory, Princeton University.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Lexical cohesion computed by thesaural relations as an indicator of the structure of text", |
| "authors": [ |
| { |
| "first": "Jane", |
| "middle": [], |
| "last": "Morris", |
| "suffix": "" |
| }, |
| { |
| "first": "Graeme", |
| "middle": [], |
| "last": "Hirst", |
| "suffix": "" |
| } |
| ], |
| "year": 1991, |
| "venue": "Computational Linguistics", |
| "volume": "17", |
| "issue": "1", |
| "pages": "21--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jane Morris and Graeme Hirst. 1991. Lexical co- hesion computed by thesaural relations as an in- dicator of the structure of text. Computational Linguistics, 17(1):21-48.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Investigating cue selection and placement in tutorial discourse", |
| "authors": [ |
| { |
| "first": "Megan", |
| "middle": [], |
| "last": "Moser", |
| "suffix": "" |
| }, |
| { |
| "first": "Johanna", |
| "middle": [ |
| "D" |
| ], |
| "last": "Moore", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Proceedings of the 33rd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "130--135", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Megan Moser and Johanna D. Moore. 1995. Investi- gating cue selection and placement in tutorial dis- course. In Proceedings of the 33rd Annual Meeting of the Association for Computational Linguistics, pages 130-135, Cambridge, Massachusetts, June.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Forthcoming. Combining multiple knowledge sources for discourse segmentation", |
| "authors": [ |
| { |
| "first": "Rebecca", |
| "middle": [ |
| "J" |
| ], |
| "last": "Passonneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Diane", |
| "middle": [ |
| "J" |
| ], |
| "last": "Litman", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Computational Linguistics. Special Issue on Empirical Studies in Discourse Interpretation and Generation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rebecca J. Passonneau and Diane J. Litman. Forth- coming. Combining multiple knowledge sources for discourse segmentation. Computational Lin- guistics. Special Issue on Empirical Studies in Discourse Interpretation and Generation.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Generating summaries of work flow diagrams", |
| "authors": [ |
| { |
| "first": "Rebecca", |
| "middle": [ |
| "J" |
| ], |
| "last": "Passonneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Karen", |
| "middle": [ |
| "K" |
| ], |
| "last": "Kukich", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacques", |
| "middle": [], |
| "last": "Robin", |
| "suffix": "" |
| }, |
| { |
| "first": "Vasileios", |
| "middle": [], |
| "last": "Hatzivassiloglou", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Proceedings of the International Conference on Natural Language Processing and Industrial Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rebecca J. Passonneau, Karen K. Kukich, Jacques Robin, Vasileios Hatzivassiloglou, Larry Lefko- witz, and Hongyan Jing. 1996. Generating summaries of work flow diagrams. In Proceed- ings of the International Conference on Natu- ral Language Processing and Industrial Applica- tions, New Brunswick, Canada, June. University of Moncton.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Using centering to relax informational constraints on discourse anaphorie noun phrases", |
| "authors": [ |
| { |
| "first": "Rebecca", |
| "middle": [ |
| "J" |
| ], |
| "last": "Passonneau", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Language and Speech", |
| "volume": "39", |
| "issue": "2-3", |
| "pages": "229--264", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rebecca J. Passonneau. 1996. Using centering to relax informational constraints on discourse anaphorie noun phrases. Language and Speech, 39(2-3):229-264, April-September. Special Dou- ble Issue on Discourse and Syntax.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Distributional clustering of English words", |
| "authors": [ |
| { |
| "first": "Fernando", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| }, |
| { |
| "first": "Naftali", |
| "middle": [], |
| "last": "Tishby", |
| "suffix": "" |
| }, |
| { |
| "first": "Lillian", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fernando Pereira, Naftali Tishby, and Lillian Lee. 1993. Distributional clustering of English words.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Proceedings of the 31st Annual Meeting of the Association for Computational Linguistics", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "183--190", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "In Proceedings of the 31st Annual Meeting of the Association for Computational Linguistics, pages 183-190, Columbus, Ohio, June.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Lexical semantic techniques for corpus analysis", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Pustejovsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Sabine", |
| "middle": [], |
| "last": "Bergler", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Anick", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "June. Special Issue on Using Large Corpora: II", |
| "volume": "19", |
| "issue": "2", |
| "pages": "331--359", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Pustejovsky, Sabine Bergler, and Peter An- ick. 1993. Lexical semantic techniques for corpus analysis. Computational Linguistics, 19(2):331- 359, June. Special Issue on Using Large Corpora: II.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Using information content to evaluate semantic similarity in a taxonomy", |
| "authors": [ |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Resnik", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Proceedings of the Fourteenth International Joint Conference on Artificial Intelligence (IJCAI-gs)", |
| "volume": "1", |
| "issue": "", |
| "pages": "448--453", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philip Resnik. 1995. Using information content to evaluate semantic similarity in a taxonomy. In Proceedings of the Fourteenth International Joint Conference on Artificial Intelligence (IJCAI-gs), volume 1, pages 448-453, Montreal, Quebec, Canada, August. Morgan Kaufmann, San Mateo, California.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Revision-Based Generation of Natural Language Summaries Providing Historical Background: Corpus-Based Analysis, Design, Implementation, and Evaluation", |
| "authors": [ |
| { |
| "first": "Jacques", |
| "middle": [], |
| "last": "Robin", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacques Robin. 1994. Revision-Based Generation of Natural Language Summaries Providing Historical Background: Corpus-Based Analysis, Design, Im- plementation, and Evaluation. Ph.D. thesis, De- partment of Computer Science, Columbia Univer- sity, New York. Also Technical Report CU-CS- 034-94.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "ARPA Software and Intelligent Systems Technology Office", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Yarowsky", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Proceedings of the ARPA Workshop on Human Language Technology", |
| "volume": "", |
| "issue": "", |
| "pages": "266--271", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Yarowsky. 1993. One sense per collocation. In Proceedings of the ARPA Workshop on Human Language Technology, pages 266-271, Plainsboro, New Jersey, March. ARPA Software and Intelli- gent Systems Technology Office, Morgan Kauf- mann, San Francisco, California.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Levin's classification of verbs in terms of their allowed alternations", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF1": { |
| "text": "Database information for the verb appear.", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF2": { |
| "text": "Automatically synthesized lexicon entry for the verb appear.", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF3": { |
| "text": "Figure 3is a bar chart showing, for each number of senses from 1 to 41, how many verbs with that number of senses occur Valid combinations of syntactic subcategorization frames/alternations and senses (marked with +) for the verb appear. Distribution of verbs according to number of senses. Low frequencies are not drawn to scale;", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "TABREF1": { |
| "content": "<table><tr><td>Verb</td><td>Number of senses in WordNet</td><td>Surviving senses after cluster-based method is applied</td><td>Reduction in ambi~ uity (typ~l)</td><td>Occurrences in the corpus (tokens)</td><td>Wrongly tasged tokens</td><td>Error rate</td></tr><tr><td>show</td><td>13</td><td>9</td><td>30.7T go</td><td>109</td><td/><td>3.67%</td></tr><tr><td>describe</td><td/><td/><td>50.00%</td><td>32</td><td/><td>3.12%-</td></tr><tr><td>present</td><td>12</td><td>6</td><td>50.00%</td><td>8</td><td/><td>12.50%</td></tr><tr><td>prof}e</td><td>9</td><td>4</td><td>55.56%</td><td>i0</td><td/><td>50.00%</td></tr><tr><td>introduce</td><td>10</td><td>4</td><td>60.00%</td><td>6</td><td/><td>50.00%</td></tr><tr><td>IW,_i,L~ ~,, ,--~,</td><td>o]1~,</td><td>s</td><td>4.q 27\u00b07,</td><td>33</td><td/><td/></tr></table>", |
| "text": "Number of words in a semantic group linked with each sense of each word in it, and associated reduction in ambiguity. Eight of the 19 words are shown.", |
| "type_str": "table", |
| "html": null, |
| "num": null |
| } |
| } |
| } |
| } |