| { |
| "paper_id": "P10-1003", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T09:22:37.062344Z" |
| }, |
| "title": "Bitext Dependency Parsing with Bilingual Subtree Constraints", |
| "authors": [ |
| { |
| "first": "Wenliang", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Language Infrastructure Group", |
| "institution": "MASTAR Project National Institute of Information and Communications Technology", |
| "location": { |
| "addrLine": "3-5 Hikari-dai, Seika-cho, Soraku-gun", |
| "postCode": "619-0289", |
| "settlement": "Kyoto", |
| "country": "Japan" |
| } |
| }, |
| "email": "chenwl@nict.go.jp" |
| }, |
| { |
| "first": "Jun", |
| "middle": [ |
| "'" |
| ], |
| "last": "Ichi Kazama", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Language Infrastructure Group", |
| "institution": "MASTAR Project National Institute of Information and Communications Technology", |
| "location": { |
| "addrLine": "3-5 Hikari-dai, Seika-cho, Soraku-gun", |
| "postCode": "619-0289", |
| "settlement": "Kyoto", |
| "country": "Japan" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Kentaro", |
| "middle": [], |
| "last": "Torisawa", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Language Infrastructure Group", |
| "institution": "MASTAR Project National Institute of Information and Communications Technology", |
| "location": { |
| "addrLine": "3-5 Hikari-dai, Seika-cho, Soraku-gun", |
| "postCode": "619-0289", |
| "settlement": "Kyoto", |
| "country": "Japan" |
| } |
| }, |
| "email": "torisawa@nict.go.jp" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper proposes a dependency parsing method that uses bilingual constraints to improve the accuracy of parsing bilingual texts (bitexts). In our method, a targetside tree fragment that corresponds to a source-side tree fragment is identified via word alignment and mapping rules that are automatically learned. Then it is verified by checking the subtree list that is collected from large scale automatically parsed data on the target side. Our method, thus, requires gold standard trees only on the source side of a bilingual corpus in the training phase, unlike the joint parsing model, which requires gold standard trees on the both sides. Compared to the reordering constraint model, which requires the same training data as ours, our method achieved higher accuracy because of richer bilingual constraints. Experiments on the translated portion of the Chinese Treebank show that our system outperforms monolingual parsers by 2.93 points for Chinese and 1.64 points for English.", |
| "pdf_parse": { |
| "paper_id": "P10-1003", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper proposes a dependency parsing method that uses bilingual constraints to improve the accuracy of parsing bilingual texts (bitexts). In our method, a targetside tree fragment that corresponds to a source-side tree fragment is identified via word alignment and mapping rules that are automatically learned. Then it is verified by checking the subtree list that is collected from large scale automatically parsed data on the target side. Our method, thus, requires gold standard trees only on the source side of a bilingual corpus in the training phase, unlike the joint parsing model, which requires gold standard trees on the both sides. Compared to the reordering constraint model, which requires the same training data as ours, our method achieved higher accuracy because of richer bilingual constraints. Experiments on the translated portion of the Chinese Treebank show that our system outperforms monolingual parsers by 2.93 points for Chinese and 1.64 points for English.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Parsing bilingual texts (bitexts) is crucial for training machine translation systems that rely on syntactic structures on either the source side or the target side, or the both (Ding and Palmer, 2005; Nakazawa et al., 2006) . Bitexts could provide more information, which is useful in parsing, than a usual monolingual texts that can be called \"bilingual constraints\", and we expect to obtain more accurate parsing results that can be effectively used in the training of MT systems. With this motivation, there are several studies aiming at highly accurate bitext parsing (Smith and Smith, 2004; Burkett and Klein, 2008; . This paper proposes a dependency parsing method, which uses the bilingual constraints that we call bilingual subtree constraints and statistics concerning the constraints estimated from large unlabeled monolingual corpora. Basically, a (candidate) dependency subtree in a source-language sentence is mapped to a subtree in the corresponding target-language sentence by using word alignment and mapping rules that are automatically learned. The target subtree is verified by checking the subtree list that is collected from unlabeled sentences in the target language parsed by a usual monolingual parser. The result is used as additional features for the source side dependency parser. In this paper, our task is to improve the source side parser with the help of the translations on the target side.", |
| "cite_spans": [ |
| { |
| "start": 178, |
| "end": 201, |
| "text": "(Ding and Palmer, 2005;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 202, |
| "end": 224, |
| "text": "Nakazawa et al., 2006)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 573, |
| "end": 596, |
| "text": "(Smith and Smith, 2004;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 597, |
| "end": 621, |
| "text": "Burkett and Klein, 2008;", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Many researchers have investigated the use of bilingual constraints for parsing (Burkett and Klein, 2008; Zhao et al., 2009; . For example, Burkett and Klein (2008) show that parsing with joint models on bitexts improves performance on either or both sides. However, their methods require that the training data have tree structures on both sides, which are hard to obtain. Our method only requires dependency annotation on the source side and is much simpler and faster. proposes a method, bilingual-constrained monolingual parsing, in which a source-language parser is extended to use the re-ordering of words between two sides' sentences as additional information. The input of their method is the source trees with their translation on the target side as ours, which is much easier to obtain than trees on both sides. However, their method does not use any tree structures on the target side that might be useful for ambiguity resolution. Our method achieves much greater improvement because it uses the richer subtree constraints.", |
| "cite_spans": [ |
| { |
| "start": 80, |
| "end": 105, |
| "text": "(Burkett and Klein, 2008;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 106, |
| "end": 124, |
| "text": "Zhao et al., 2009;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 140, |
| "end": 164, |
| "text": "Burkett and Klein (2008)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our approach takes the same input as and exploits the subtree structure on the target side to provide the bilingual constraints. The subtrees are extracted from large-scale autoparsed monolingual data on the target side. The main problem to be addressed is mapping words on the source side to the target subtree because there are many to many mappings and reordering problems that often occur in translation (Koehn et al., 2003) . We use an automatic way for generating mapping rules to solve the problems. Based on the mapping rules, we design a set of features for parsing models. The basic idea is as follows: if the words form a subtree on one side, their corresponding words on the another side will also probably form a subtree.", |
| "cite_spans": [ |
| { |
| "start": 408, |
| "end": 428, |
| "text": "(Koehn et al., 2003)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Experiments on the translated portion of the Chinese Treebank (Xue et al., 2002; Bies et al., 2007) show that our system outperforms state-ofthe-art monolingual parsers by 2.93 points for Chinese and 1.64 points for English. The results also show that our system provides higher accuracies than the parser of .", |
| "cite_spans": [ |
| { |
| "start": 62, |
| "end": 80, |
| "text": "(Xue et al., 2002;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 81, |
| "end": 99, |
| "text": "Bies et al., 2007)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The rest of the paper is organized as follows: Section 2 introduces the motivation of our idea. Section 3 introduces the background of dependency parsing. Section 4 proposes an approach of constructing bilingual subtree constraints. Section 5 explains the experimental results. Finally, in Section 6 we draw conclusions and discuss future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this section, we use an example to show the idea of using the bilingual subtree constraints to improve parsing performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivation", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Suppose that we have an input sentence pair as shown in Figure 1 , where the source sentence is in English, the target is in Chinese, the dashed undirected links are word alignment links, and the directed links between words indicate that they have a (candidate) dependency relation.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 56, |
| "end": 64, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Motivation", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In the English side, it is difficult for a parser to determine the head of word \"with\" because there is a PP-attachment problem. However, in Chinese it is unambiguous. Therefore, we can use the information on the Chinese side to help disambigua-He ate the meat with a fork .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivation", |
| "sec_num": "2" |
| }, |
| { |
| "text": "(He) (use) (fork) (eat) (meat) (.) Figure 1 : Example for disambiguation tion. There are two candidates \"ate\" and \"meat\" to be the head of \"with\" as the dashed directed links in Figure 1 show. By adding \"fork\", we have two possible dependency relations, \"meat-with-fork\" and \"ate-with-fork\", to be verified. First, we check the possible relation of \"meat\", \"with\", and \"fork\". We obtain their corresponding words \"\u8089(meat)\", \"\u7528(use)\", and \"\u53c9\u5b50(fork)\" in Chinese via the word alignment links. We verify that the corresponding words form a subtree by looking up a subtree list in Chinese (described in Section 4.1). But we can not find a subtree for them.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 35, |
| "end": 43, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 178, |
| "end": 186, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Motivation", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Next, we check the possible relation of \"ate\", \"with\", and \"fork\". We obtain their corresponding words \"\u5403(ate)\", \"\u7528(use)\", and \"\u53c9\u5b50(fork)\". Then we verify that the words form a subtree by looking up the subtree list. This time we can find the subtree as shown in Figure 2 . Finally, the parser may assign \"ate\" to be the head of \"with\" based on the verification results. This simple example shows how to use the subtree information on the target side.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 262, |
| "end": 270, |
| "text": "Figure 2", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Motivation", |
| "sec_num": "2" |
| }, |
| { |
| "text": "For dependency parsing, there are two main types of parsing models (Nivre and McDonald, 2008; Nivre and Kubler, 2006) : transition-based (Nivre, 2003; Yamada and Matsumoto, 2003) and graphbased (McDonald et al., 2005; Carreras, 2007) . Our approach can be applied to both parsing models.", |
| "cite_spans": [ |
| { |
| "start": 67, |
| "end": 93, |
| "text": "(Nivre and McDonald, 2008;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 94, |
| "end": 117, |
| "text": "Nivre and Kubler, 2006)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 137, |
| "end": 150, |
| "text": "(Nivre, 2003;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 151, |
| "end": 178, |
| "text": "Yamada and Matsumoto, 2003)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 194, |
| "end": 217, |
| "text": "(McDonald et al., 2005;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 218, |
| "end": 233, |
| "text": "Carreras, 2007)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dependency parsing", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In this paper, we employ the graph-based MST parsing model proposed by McDonald and Pereira (2006) , which is an extension of the projective parsing algorithm of Eisner (1996) . To use richer second-order information, we also implement parent-child-grandchild features (Carreras, 2007) in the MST parsing algorithm. Figure 3 shows an example of dependency parsing. In the graph-based parsing model, features are represented for all the possible relations on single edges (two words) or adjacent edges (three words). The parsing algorithm chooses the tree with the highest score in a bottom-up fashion.", |
| "cite_spans": [ |
| { |
| "start": 71, |
| "end": 98, |
| "text": "McDonald and Pereira (2006)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 162, |
| "end": 175, |
| "text": "Eisner (1996)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 269, |
| "end": 285, |
| "text": "(Carreras, 2007)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 316, |
| "end": 324, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dependency parsing", |
| "sec_num": "3" |
| }, |
| { |
| "text": "ROOT He ate the meat with a fork .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing with monolingual features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In our systems, the monolingual features include the first-and second-order features presented in (McDonald et al., 2005; McDonald and Pereira, 2006) and the parent-child-grandchild features used in (Carreras, 2007) . We call the parser with the monolingual features monolingual parser.", |
| "cite_spans": [ |
| { |
| "start": 98, |
| "end": 121, |
| "text": "(McDonald et al., 2005;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 122, |
| "end": 149, |
| "text": "McDonald and Pereira, 2006)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 199, |
| "end": 215, |
| "text": "(Carreras, 2007)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Figure 3: Example of dependency tree", |
| "sec_num": null |
| }, |
| { |
| "text": "In this paper, we parse source sentences with the help of their translations. A set of bilingual features are designed for the parsing model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing with bilingual features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We design bilingual subtree features, as described in Section 4, based on the constraints between the source subtrees and the target subtrees that are verified by the subtree list on the target side. The source subtrees are from the possible dependency relations. propose features based on reordering between languages for a shift-reduce parser. They define the features based on wordalignment information to verify that the corresponding words form a contiguous span for resolving shift-reduce conflicts. We also implement similar features in our system.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingual subtree features", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "In this section, we propose an approach that uses the bilingual subtree constraints to help parse source sentences that have translations on the target side.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingual subtree constraints", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We use large-scale auto-parsed data to obtain subtrees on the target side. Then we generate the mapping rules to map the source subtrees onto the extracted target subtrees. Finally, we design the bilingual subtree features based on the mapping rules for the parsing model. These features indicate the information of the constraints between bilingual subtrees, that are called bilingual subtree constraints.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingual subtree constraints", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Chen et al. 2009propose a simple method to extract subtrees from large-scale monolingual data and use them as features to improve monolingual parsing. Following their method, we parse large unannotated data with a monolingual parser and obtain a set of subtrees (ST t ) in the target language.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Subtree extraction", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We encode the subtrees into string format that is expressed as st = w : hid(\u2212w : hid)+ 1 , where w refers to a word in the subtree and hid refers to the word ID of the word's head (hid=0 means that this word is the root of a subtree). Here, word ID refers to the ID (starting from 1) of a word in the subtree (words are ordered based on the positions of the original sentence). For example, \"He\" and \"ate\" have a left dependency arc in the sentence shown in Figure 3 . The subtree is encoded as \"He:2ate:0\". There is also a parent-child-grandchild relation among \"ate\", \"with\", and \"fork\". So the subtree is encoded as \"ate:0-with:1-fork:2\". If a subtree contains two nodes, we call it a bigramsubtree. If a subtree contains three nodes, we call it a trigram-subtree.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 458, |
| "end": 466, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Subtree extraction", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "From the dependency tree of Figure 3 , we obtain the subtrees, as shown in Figure 4 and Figure 5 . Figure 4 shows the extracted bigram-subtrees and Figure 5 shows the extracted trigram-subtrees. After extraction, we obtain a set of subtrees. We remove the subtrees occurring only once in the data. Following Chen et al. (2009), we also group the subtrees into different sets based on their frequencies. with:1:0-NULL:2:1-fork:3:1 ate:1:0-the:2:3-meat:3:1 ate:1:0-with:2:1-fork:3:2 with:1:0-a:2:3-fork:3:1 NULL:1:2-He:2:3-ate:3:0 He:1:3-NULL:2:1-ate:3:0 ate:1:0-meat:2:1-NULL:3:2 ate:1:0-NULL:2:3-with:3:1 with:1:0-fork:2:1-NULL:3:2 NULL:1:2-a:2:3-fork:3:0 a:1:3-NULL:2:1-fork:3:0 ate:1:0-NULL:2:3-.:3:1 ate:1:0-.:2:1-NULL:3:2 (b) NULL:1:2-the:2:3-meat:3:0 the:1:3-NULL:2:1-meat:3:0 ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 28, |
| "end": 36, |
| "text": "Figure 3", |
| "ref_id": null |
| }, |
| { |
| "start": 75, |
| "end": 83, |
| "text": "Figure 4", |
| "ref_id": null |
| }, |
| { |
| "start": 88, |
| "end": 97, |
| "text": "Figure 5", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 100, |
| "end": 108, |
| "text": "Figure 4", |
| "ref_id": null |
| }, |
| { |
| "start": 149, |
| "end": 157, |
| "text": "Figure 5", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Subtree extraction", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "To provide bilingual subtree constraints, we need to find the characteristics of subtree mapping for the two given languages. However, subtree mapping is not easy. There are two main problems: MtoN (words) mapping and reordering, which often occur in translation. MtoN (words) mapping means that a source subtree with M words is mapped onto a target subtree with N words. For example, 2to3 means that a source bigram-subtree is mapped onto a target trigram-subtree.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Mapping rules", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Due to the limitations of the parsing algorithm (McDonald and Pereira, 2006; Carreras, 2007) , we only use bigram-and trigram-subtrees in our approach. We generate the mapping rules for the 2to2, 2to3, 3to3, and 3to2 cases. For trigram-subtrees, we only consider the parentchild-grandchild type. As for the use of other types of trigram-subtrees, we leave it for future work.", |
| "cite_spans": [ |
| { |
| "start": 48, |
| "end": 76, |
| "text": "(McDonald and Pereira, 2006;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 77, |
| "end": 92, |
| "text": "Carreras, 2007)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Mapping rules", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We first show the MtoN and reordering problems by using an example in Chinese-English translation. Then we propose a method to automatically generate mapping rules.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Mapping rules", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "translation Both Chinese and English are classified as SVO languages because verbs precede objects in simple sentences. However, Chinese has many characteristics of such SOV languages as Japanese. The typical cases are listed below: 1) Prepositional phrases modifying a verb precede the verb. Figure 6 shows an example. In English the prepositional phrase \"at the ceremony\" follows the verb \"said\", while its corresponding prepositional phrase \"\u5728(NULL) \u4eea\u5f0f(ceremony) \u4e0a(at)\" precedes the verb \"\u8bf4(say)\" in Chinese. Figure 7 shows an example. In Chinese the relative clause \"\u4eca\u5929(today) \u7b7e\u5b57(signed)\" precedes the head noun \"\u9879\u76ee(project)\", while its corresponding clause \"signed today\" follows the head noun \"projects\" in English.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 293, |
| "end": 301, |
| "text": "Figure 6", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 512, |
| "end": 520, |
| "text": "Figure 7", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Reordering and MtoN mapping in", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "The 3 projects signed today 3) Genitive constructions precede head noun. For example, \"\u6c7d \u8f66(car) \u8f6e \u5b50(wheel)\" can be translated as \"the wheel of the car\". 4) Postposition in many constructions rather than prepositions. For example, \"\u684c \u5b50(table) \u4e0a(on)\" can be translated as \"on the table\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Said at the ceremony", |
| "sec_num": null |
| }, |
| { |
| "text": "We can find the MtoN mapping problem occurring in the above cases. For example, in Figure 6 , trigram-subtree \"\u5728(NULL):3-\u4e0a(at):1-\u8bf4(say):0\" is mapped onto bigram-subtree \"said:0-at:1\".", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 83, |
| "end": 91, |
| "text": "Figure 6", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Said at the ceremony", |
| "sec_num": null |
| }, |
| { |
| "text": "Since asking linguists to define the mapping rules is very expensive, we propose a simple method to easily obtain the mapping rules.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Said at the ceremony", |
| "sec_num": null |
| }, |
| { |
| "text": "To solve the mapping problems, we use a bilingual corpus, which includes sentence pairs, to automatically generate the mapping rules. First, the sentence pairs are parsed by monolingual parsers on both sides. Then we perform word alignment using a word-level aligner (Liang et al., 2006; DeNero and Klein, 2007) . Figure 8 shows an example of a processed sentence pair that has tree structures on both sides and word alignment links. From these sentence pairs, we obtain subtree pairs. First, we extract a subtree (st s ) from a source sentence. Then through word alignment links, we obtain the corresponding words of the words of st s . Because of the MtoN problem, some words lack of corresponding words in the target sentence. Here, our approach requires that at least two words of st s have corresponding words and nouns and verbs need corresponding words. If not, it fails to find a subtree pair for st s . If the corresponding words form a subtree (st t ) in the target sentence, st s and st t are a subtree pair. We also keep the word alignment information in the target subtree. For example, we extract subtree \"\u793e \u4f1a(society):2-\u8fb9\u7f18(fringe):0\" on the Chinese side and get its corresponding subtree \"fringes(W 2):0of:1-society(W 1):2\" on the English side, where W 1 means that the target word is aligned to the first word of the source subtree, and W 2 means that the target word is aligned to the second word of the source subtree. That is, we have a sub-tree pair: \"\u793e \u4f1a(society):2-\u8fb9 \u7f18(fringe):0\" and \"fringe(W 2):0-of:1-society(W 1):2\".", |
| "cite_spans": [ |
| { |
| "start": 267, |
| "end": 287, |
| "text": "(Liang et al., 2006;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 288, |
| "end": 311, |
| "text": "DeNero and Klein, 2007)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 314, |
| "end": 322, |
| "text": "Figure 8", |
| "ref_id": "FIGREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Bilingual subtree mapping", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "The extracted subtree pairs indicate the translation characteristics between Chinese and English. For example, the pair \"\u793e \u4f1a(society):2-\u8fb9 \u7f18(fringe):0\" and \"fringes:0-of:1-society:2\" is a case where \"Genitive constructions precede/follow the head noun\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingual subtree mapping", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "To increase the mapping coverage, we generalize the mapping rules from the extracted subtree pairs by using the following procedure. The rules are divided by \"=>\" into two parts: source (left) and target (right). The source part is from the source subtree and the target part is from the target subtree. For the source part, we replace nouns and verbs using their POS tags (coarse grained tags). For the target part, we use the word alignment information to represent the target words that have corresponding source words. For example, we have the subtree pair: \"\u793e \u4f1a(society):2-\u8fb9 \u7f18(fringe):0\" and \"fringes(W 2):0-of:1-society(W 1):2\", where \"of\" does not have a corresponding word, the POS tag of \"\u793e\u4f1a(society)\" is N, and the POS tag of \"\u8fb9\u7f18(fringe)\" is N. The source part of the rule becomes \"N:2-N:0\" and the target part becomes \"W 2:0-of:1-W 1:2\". Table 1 shows the top five mapping rules of all four types ordered by their frequencies, where W 1 means that the target word is aligned to the first word of the source subtree, W 2 means that the target word is aligned to the second word, and W 3 means that the target word is aligned to the third word. We remove the rules that occur less than three times. Finally, we obtain 9,134 rules for 2to2, 5,335 for 2to3, 7,450 for 3to3, and 1,244 for 3to2 from our data. After experiments with different threshold settings on the development data sets, we use the top 20 rules for each type in our experiments.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 849, |
| "end": 856, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Generalized mapping rules", |
| "sec_num": "4.2.3" |
| }, |
| { |
| "text": "The generalized mapping rules might generate incorrect target subtrees. However, as described in Section 4.3.1, the generated subtrees are verified by looking up list ST t before they are used in the parsing models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generalized mapping rules", |
| "sec_num": "4.2.3" |
| }, |
| { |
| "text": "Informally, if the words form a subtree on the source side, then the corresponding words on the target side will also probably form a subtree. For # rules freq 2to2 mapping 1 N:2 N:0 => W 1:2 W 2:0 92776 2 V:0 N:1 => W 1:0 W 2:1 62437 3 V:0 V:1 => W 1:0 W 2:1 49633 4 N:2 V:0 => W 1:2 W 2:0 43999 5 \u7684:2 N:0 => W 2:0 W 1:2 25301 2to3 mapping 1 N:2-N:0 => W 2:0-of:1-W 1:2 10361 2 V:0-N:1 => W 1:0-of:1-W 2:2 4521 3 V:0-N:1 => W 1:0-to:1-W 2:2 2917 4 N:2-V:0 => W 2:0-of:1-W 1:2 2578 5 N:2-N:0 => W 1:2-':3-W 2:0 2316 3to2 mapping 1 V:2-\u7684/DEC:3-N:0 => W 1:0-W 3:1 873 2 V:2-\u7684/DEC:3-N:0 => W 3:2-W 1:0 634 3 N:2-\u7684/DEG:3-N:0 => W 1:0-W 3:1 319 4 N:2-\u7684/DEG:3-N:0 => W 3:2-W 1:0 301 5 V:0-\u7684/DEG:3-N:1 => W 3:0-W 1:1 247 3to3 mapping 1 V:0-V:1-N:2 => W 1:0-W 2:1-W 3:2 N:2-\u7684/DEG:3-N:0 => W 3:0-W 2:1-W 1:2 7010 3 V:0-N:3-N:1 => W 1:0-W 2:3-W 3:1 5642 4 V:0-V:1-V:2 => W 1:0-W 2:1-W 3:2 4563 5 N:2-N:3-N:0 => W 1:2-W 2:3-W 3:0 3570 Figure 8 , words \"\u4ed6 \u4eec(they)\" and \"\u5904\u4e8e(be on)\" form a subtree , which is mapped onto the words \"they\" and \"are\" on the target side. These two target words form a subtree. We now develop this idea as bilingual subtree features.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 924, |
| "end": 932, |
| "text": "Figure 8", |
| "ref_id": "FIGREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Bilingual subtree features", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "In the parsing process, we build relations for two or three words on the source side. The conditions of generating bilingual subtree features are that at least two of these source words must have corresponding words on the target side and nouns and verbs must have corresponding words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingual subtree features", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "At first, we have a possible dependency relation (represented as a source subtree) of words to be verified. Then we obtain the corresponding target subtree based on the mapping rules. Finally, we verify that the target subtree is included in ST t . If yes, we activate a positive feature to encourage the dependency relation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingual subtree features", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Those are the 3 projects signed today Those are the 3 projects signed today We consider four types of features based on 2to2, 3to3, 3to2, and 2to3 mappings. In the 2to2, 3to3, and 3to2 cases, the target subtrees do not add new words. We represent features in a direct way. For the 2to3 case, we represent features using a different strategy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingual subtree features", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We design the features based on the mapping rules of 2to2, 3to3, and 3to2. For example, we design features for a 3to2 case from Figure 9 . The possible relation to be verified forms source subtree \"\u7b7e \u5b57(signed)/VV:2-\u7684(NULL)/DEC:3-\u9879 \u76ee(project)/NN:0\" in which \"\u9879 \u76ee(project)\" is aligned to \"projects\" and \"\u7b7e \u5b57(signed)\" is aligned to \"signed\" as shown in Figure 9 . The procedure of generating the features is shown in Figure 10 . We explain Steps (1), (2), (3), and (4) as follows:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 128, |
| "end": 136, |
| "text": "Figure 9", |
| "ref_id": "FIGREF7" |
| }, |
| { |
| "start": 350, |
| "end": 358, |
| "text": "Figure 9", |
| "ref_id": "FIGREF7" |
| }, |
| { |
| "start": 414, |
| "end": 423, |
| "text": "Figure 10", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Features for 2to2, 3to3, and 3to2", |
| "sec_num": "4.3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "/VV:2-/DEC:3- /NN:0 projects(W_3) signed(W_1)", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Features for 2to2, 3to3, and 3to2", |
| "sec_num": "4.3.1" |
| }, |
| { |
| "text": "V:2-/DEC:3-N:0 W_3:0-W_1:1 W 3:2 W 1:0", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features for 2to2, 3to3, and 3to2", |
| "sec_num": "4.3.1" |
| }, |
| { |
| "text": "(2) W_3:2-W_1:0", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features for 2to2, 3to3, and 3to2", |
| "sec_num": "4.3.1" |
| }, |
| { |
| "text": "(3) projects:0-signed:1 projects:2-signed:0 ST t", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features for 2to2, 3to3, and 3to2", |
| "sec_num": "4.3.1" |
| }, |
| { |
| "text": "3to2:YES (4) Figure 10 : Example of feature generation for 3to2 case", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 13, |
| "end": 22, |
| "text": "Figure 10", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Features for 2to2, 3to3, and 3to2", |
| "sec_num": "4.3.1" |
| }, |
| { |
| "text": "(1) Generate source part from the source subtree. We obtain \"V:2-\u7684/DEC:3-N:0\" from \"\u7b7e \u5b57(signed)/VV:2-\u7684(NULL)/DEC:3-\u9879 \u76ee(project)/NN:0\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features for 2to2, 3to3, and 3to2", |
| "sec_num": "4.3.1" |
| }, |
| { |
| "text": "(2) Obtain target parts based on the matched mapping rules, whose source parts equal \"V:2-\u7684/DEC:3-N:0\". The matched rules are \"V:2-\u7684/DEC:3-N:0 =>W 3:0-W 1:1\" and \"V:2-\u7684/DEC:3-N:0 => W 3:2-W 1:0\". Thus, we have two target parts \"W 3:0-W 1:1\" and \"W 3:2-W 1:0\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features for 2to2, 3to3, and 3to2", |
| "sec_num": "4.3.1" |
| }, |
| { |
| "text": "(3) Generate possible subtrees by consider-ing the dependency relation indicated in the target parts. We generate a possible subtree \"projects:0-signed:1\" from the target part \"W 3:0-W 1:1\", where \"projects\" is aligned to \"\u9879 \u76ee(project)(W 3)\" and \"signed\" is aligned to \"\u7b7e \u5b57(signed)(W 1)\". We also generate another possible subtree \"projects:2-signed:0\" from \"W 3:2-W 1:0\". (4) Verify that at least one of the generated possible subtrees is a target subtree, which is included in ST t . If yes, we activate this feature. In the figure, \"projects:0-signed:1\" is a target subtree in ST t . So we activate the feature \"3to2:YES\" to encourage dependency relations among \"\u7b7e \u5b57(signed)\", \"\u7684(NULL)\", and \"\u9879\u76ee(project)\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features for 2to2, 3to3, and 3to2", |
| "sec_num": "4.3.1" |
| }, |
| { |
| "text": "In the 2to3 case, a new word is added on the target side. The first two steps are identical as those in the previous section. For example, a source part \"N:2-N:0\" is generated from \"\u6c7d\u8f66(car)/NN:2-\u8f6e \u5b50(wheel)/NN:0\". Then we obtain target parts such as \"W 2:0-of/IN:1-W 1:2\", \"W 2:0-in/IN:1-W 1:2\", and so on, according to the matched mapping rules.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features for 2to3", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "The third step is different. In the target parts, there is an added word. We first check if the added word is in the span of the corresponding words, which can be obtained through word alignment links. We can find that \"of\" is in the span \"wheel of the car\", which is the span of the corresponding words of \"\u6c7d \u8f66(car)/NN:2-\u8f6e \u5b50(wheel)/NN:0\". Then we choose the target part \"W 2:0-of/IN:1-W 1:2\" to generate a possible subtree. Finally, we verify that the subtree is a target subtree included in ST t . If yes, we say feature \"2to3:YES\" to encourage a dependency relation between \"\u6c7d \u8f66(car)\" and \"\u8f6e\u5b50(wheel)\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features for 2to3", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "Chen et al. (2009) shows that the source subtree features (F src\u2212st ) significantly improve performance. The subtrees are obtained from the auto-parsed data on the source side. Then they are used to verify the possible dependency relations among source words.", |
| "cite_spans": [ |
| { |
| "start": 12, |
| "end": 18, |
| "text": "(2009)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Source subtree features", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "In our approach, we also use the same source subtree features described in Chen et al. (2009) . So the possible dependency relations are verified by the source and target subtrees. Combining two types of features together provides strong discrimination power. If both types of features are ac-tive, building relations is very likely among source words. If both are inactive, this is a strong negative signal for their relations.", |
| "cite_spans": [ |
| { |
| "start": 75, |
| "end": 93, |
| "text": "Chen et al. (2009)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Source subtree features", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "All the bilingual data were taken from the translated portion of the Chinese Treebank (CTB) (Xue et al., 2002; Bies et al., 2007) , articles 1-325 of CTB, which have English translations with gold-standard parse trees. We used the tool \"Penn2Malt\" 2 to convert the data into dependency structures. Following the study of , we used the same split of this data: 1-270 for training, 301-325 for development, and 271-300 for test. Note that some sentence pairs were removed because they are not one-to-one aligned at the sentence level (Burkett and Klein, 2008; . Word alignments were generated from the Berkeley Aligner (Liang et al., 2006; DeNero and Klein, 2007) trained on a bilingual corpus having approximately 0.8M sentence pairs. We removed notoriously bad links in {a, an, the}\u00d7{\u7684(DE), \u4e86(LE)} following the work of .", |
| "cite_spans": [ |
| { |
| "start": 92, |
| "end": 110, |
| "text": "(Xue et al., 2002;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 111, |
| "end": 129, |
| "text": "Bies et al., 2007)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 532, |
| "end": 557, |
| "text": "(Burkett and Klein, 2008;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 617, |
| "end": 637, |
| "text": "(Liang et al., 2006;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 638, |
| "end": 661, |
| "text": "DeNero and Klein, 2007)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "For Chinese unannotated data, we used the XIN CMN portion of Chinese Gigaword Version 2.0 (LDC2009T14) (Huang, 2009) , which has approximately 311 million words whose segmentation and POS tags are given. To avoid unfair comparison, we excluded the sentences of the CTB data from the Gigaword data. We discarded the annotations because there are differences in annotation policy between CTB and this corpus. We used the MMA system (Kruengkrai et al., 2009) trained on the training data to perform word segmentation and POS tagging and used the Baseline Parser to parse all the sentences in the data. For English unannotated data, we used the BLLIP corpus that contains about 43 million words of WSJ text. The POS tags were assigned by the MXPOST tagger trained on training data. Then we used the Baseline Parser to parse all the sentences in the data.", |
| "cite_spans": [ |
| { |
| "start": 103, |
| "end": 116, |
| "text": "(Huang, 2009)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 430, |
| "end": 455, |
| "text": "(Kruengkrai et al., 2009)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We reported the parser quality by the unlabeled attachment score (UAS), i.e., the percentage of tokens (excluding all punctuation tokens) with correct HEADs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The results on the Chinese-source side are shown in Table 2 , where \"Baseline\" refers to the systems with monolingual features, \"Baseline2\" refers to adding the reordering features to the Baseline, \"F BI \" refers to adding all the bilingual subtree features to \"Baseline2\", \"F src\u2212st \" refers to the monolingual parsing systems with source subtree features, \"Order-1\" refers to the first-order models, and \"Order-2\" refers to the second-order models. The results showed that the reordering features yielded an improvement of 0.53 and 0.58 points (UAS) for the first-and second-order models respectively. Then we added four types of bilingual constraint features one by one to \"Base-line2\". Note that the features based on 3to2 and 3to3 can not be applied to the first-order models, because they only consider single dependencies (bigram). That is, in the first model, F BI only includes the features based on 2to2 and 2to3. The results showed that the systems performed better and better. In total, we obtained an absolute improvement of 0.88 points (UAS) for the first-order model and 1.36 points for the second-order model by adding all the bilingual subtree features. Finally, the system with all the features (OURS) outperformed the Baseline by an absolute improvement of 3.12 points for the first-order model and 2.93 points for the second-order model. The improvements of the final systems (OURS) were significant in McNemar's Test (p < 10 \u22124 ).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 52, |
| "end": 59, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Main results", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Order We also conducted experiments on the Englishsource side. Table 3 shows the results, where abbreviations are the same as in Table 2 . As in the Chinese experiments, the parsers with bilingual subtree features outperformed the Baselines. Finally, the systems (OURS) with all the features outperformed the Baselines by 1.30 points for the first-order model and 1.64 for the second-order model. The improvements of the final systems (OURS) were significant in McNemar's Test (p < 10 \u22123 ).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 63, |
| "end": 70, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 129, |
| "end": 136, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Order-1", |
| "sec_num": null |
| }, |
| { |
| "text": "Order Table 4 shows the performance of the system we compared, where Huang2009 refers to the result of . The results showed that our system performed better than Huang2009. Compared with the approach of , our approach used additional large-scale autoparsed data. We did not compare our system with the joint model of Burkett and Klein (2008) ", |
| "cite_spans": [ |
| { |
| "start": 317, |
| "end": 341, |
| "text": "Burkett and Klein (2008)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 6, |
| "end": 13, |
| "text": "Table 4", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Order-1", |
| "sec_num": null |
| }, |
| { |
| "text": "We presented an approach using large automatically parsed monolingual data to provide bilingual subtree constraints to improve bitexts parsing. Our approach remains the efficiency of monolingual parsing and exploits the subtree structure on the target side. The experimental results show that the proposed approach is simple yet still provides significant improvements over the baselines in parsing accuracy. The results also show that our systems outperform the system of previous work on the same data. There are many ways in which this research could be continued. First, we may attempt to apply the bilingual subtree constraints to transition-based parsing models (Nivre, 2003; Yamada and Matsumoto, 2003) . Here, we may design new features for the models. Second, we may apply the proposed method for other language pairs such as Japanese-English and Chinese-Japanese. Third, larger unannotated data can be used to improve the performance further.", |
| "cite_spans": [ |
| { |
| "start": 668, |
| "end": 681, |
| "text": "(Nivre, 2003;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 682, |
| "end": 709, |
| "text": "Yamada and Matsumoto, 2003)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "+ refers to matching the preceding element one or more times and is the same as a regular expression in Perl.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://w3.msi.vxu.se/\u02dcnivre/research/Penn2Malt.html", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "English Chinese translation treebank v 1.0", |
| "authors": [ |
| { |
| "first": "Ann", |
| "middle": [], |
| "last": "Bies", |
| "suffix": "" |
| }, |
| { |
| "first": "Martha", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| }, |
| { |
| "first": "Justin", |
| "middle": [], |
| "last": "Mott", |
| "suffix": "" |
| }, |
| { |
| "first": "Colin", |
| "middle": [], |
| "last": "Warner", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "2007--2009", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ann Bies, Martha Palmer, Justin Mott, and Colin Warner. 2007. English Chinese translation treebank v 1.0. In LDC2007T02.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Two languages are better than one (for syntactic parsing)", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Burkett", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 2008 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "877--886", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Burkett and Dan Klein. 2008. Two languages are better than one (for syntactic parsing). In Pro- ceedings of the 2008 Conference on Empirical Meth- ods in Natural Language Processing, pages 877- 886, Honolulu, Hawaii, October. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Experiments with a higher-order projective dependency parser", |
| "authors": [ |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Carreras", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the CoNLL Shared Task Session of EMNLP-CoNLL", |
| "volume": "", |
| "issue": "", |
| "pages": "957--961", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "X. Carreras. 2007. Experiments with a higher-order projective dependency parser. In Proceedings of the CoNLL Shared Task Session of EMNLP-CoNLL 2007, pages 957-961.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Improving dependency parsing with subtrees from auto-parsed data", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Wl", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Kazama", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Uchimoto", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Torisawa", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "570--579", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "WL. Chen, J. Kazama, K. Uchimoto, and K. Torisawa. 2009. Improving dependency parsing with subtrees from auto-parsed data. In Proceedings of the 2009 Conference on Empirical Methods in Natural Lan- guage Processing, pages 570-579, Singapore, Au- gust. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Tailoring word alignments to syntactic machine translation", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Denero", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 45th Annual Meeting of the Association of Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "17--24", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John DeNero and Dan Klein. 2007. Tailoring word alignments to syntactic machine translation. In Pro- ceedings of the 45th Annual Meeting of the Asso- ciation of Computational Linguistics, pages 17-24, Prague, Czech Republic, June. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Machine translation using probabilistic synchronous dependency insertion grammars", |
| "authors": [ |
| { |
| "first": "Yuan", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "Martha", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "ACL '05: Proceedings of the 43rd Annual Meeting on Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "541--548", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuan Ding and Martha Palmer. 2005. Machine trans- lation using probabilistic synchronous dependency insertion grammars. In ACL '05: Proceedings of the 43rd Annual Meeting on Association for Computa- tional Linguistics, pages 541-548, Morristown, NJ, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Three new probabilistic models for dependency parsing: An exploration", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Eisner", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Proc. of the 16th Intern. Conf. on Computational Linguistics (COLING)", |
| "volume": "", |
| "issue": "", |
| "pages": "340--345", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Eisner. 1996. Three new probabilistic models for dependency parsing: An exploration. In Proc. of the 16th Intern. Conf. on Computational Linguistics (COLING), pages 340-345.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Bilingually-constrained (monolingual) shift-reduce parsing", |
| "authors": [ |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wenbin", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Qun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1222--1231", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liang Huang, Wenbin Jiang, and Qun Liu. 2009. Bilingually-constrained (monolingual) shift-reduce parsing. In Proceedings of the 2009 Conference on Empirical Methods in Natural Language Process- ing, pages 1222-1231, Singapore, August. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Tagged Chinese Gigaword Version 2.0, LDC2009T14. Linguistic Data Consortium", |
| "authors": [ |
| { |
| "first": "Chu-Ren", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chu-Ren Huang. 2009. Tagged Chinese Gigaword Version 2.0, LDC2009T14. Linguistic Data Con- sortium.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Statistical phrase-based translation", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [ |
| "J" |
| ], |
| "last": "Och", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Marcu", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "P. Koehn, F.J. Och, and D. Marcu. 2003. Statistical phrase-based translation. In Proceedings of NAACL, page 54. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "An error-driven word-character hybrid model for joint Chinese word segmentation and POS tagging", |
| "authors": [ |
| { |
| "first": "Canasai", |
| "middle": [], |
| "last": "Kruengkrai", |
| "suffix": "" |
| }, |
| { |
| "first": "Kiyotaka", |
| "middle": [], |
| "last": "Uchimoto", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiou", |
| "middle": [], |
| "last": "Jun'ichi Kazama", |
| "suffix": "" |
| }, |
| { |
| "first": "Kentaro", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hitoshi", |
| "middle": [], |
| "last": "Torisawa", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Isahara", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of ACL-IJCNLP2009", |
| "volume": "", |
| "issue": "", |
| "pages": "513--521", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Canasai Kruengkrai, Kiyotaka Uchimoto, Jun'ichi Kazama, Yiou Wang, Kentaro Torisawa, and Hitoshi Isahara. 2009. An error-driven word-character hy- brid model for joint Chinese word segmentation and POS tagging. In Proceedings of ACL-IJCNLP2009, pages 513-521, Suntec, Singapore, August. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Alignment by agreement", |
| "authors": [ |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Taskar", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the Human Language Technology Conference of the NAACL, Main Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "104--111", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Percy Liang, Ben Taskar, and Dan Klein. 2006. Align- ment by agreement. In Proceedings of the Human Language Technology Conference of the NAACL, Main Conference, pages 104-111, New York City, USA, June. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Online learning of approximate dependency parsing algorithms", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Mcdonald", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proc. of EACL2006", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. McDonald and F. Pereira. 2006. Online learning of approximate dependency parsing algorithms. In Proc. of EACL2006.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Online large-margin training of dependency parsers", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Mcdonald", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Crammer", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. McDonald, K. Crammer, and F. Pereira. 2005. On- line large-margin training of dependency parsers. In Proc. of ACL 2005.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Example-based machine translation based on deeper nlp", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Nakazawa", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Kawahara", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Kurohashi", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of IWSLT 2006", |
| "volume": "", |
| "issue": "", |
| "pages": "64--70", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. Nakazawa, K. Yu, D. Kawahara, and S. Kurohashi. 2006. Example-based machine translation based on deeper nlp. In Proceedings of IWSLT 2006, pages 64-70, Kyoto, Japan.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Dependency parsing: Tutorial at Coling-ACL 2006. In CoLING-ACL", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Kubler", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Nivre and S. Kubler. 2006. Dependency parsing: Tutorial at Coling-ACL 2006. In CoLING-ACL.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Integrating graphbased and transition-based dependency parsers", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Mcdonald", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of ACL-08: HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Nivre and R. McDonald. 2008. Integrating graph- based and transition-based dependency parsers. In Proceedings of ACL-08: HLT, Columbus, Ohio, June.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "An efficient algorithm for projective dependency parsing", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of IWPT2003", |
| "volume": "", |
| "issue": "", |
| "pages": "149--160", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Nivre. 2003. An efficient algorithm for projective dependency parsing. In Proceedings of IWPT2003, pages 149-160.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Bilingual parsing with factored estimation: Using English to parse Korean", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David A. Smith and Noah A. Smith. 2004. Bilingual parsing with factored estimation: Using English to parse Korean. In Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Building a large-scale annotated Chinese corpus", |
| "authors": [ |
| { |
| "first": "Nianwen", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| }, |
| { |
| "first": "Fu-Dong", |
| "middle": [], |
| "last": "Chiou", |
| "suffix": "" |
| }, |
| { |
| "first": "Martha", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nianwen Xue, Fu-Dong Chiou, and Martha Palmer. 2002. Building a large-scale annotated Chinese cor- pus. In Coling.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Statistical dependency analysis with support vector machines", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Yamada", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Matsumoto", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of IWPT2003", |
| "volume": "", |
| "issue": "", |
| "pages": "195--206", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "H. Yamada and Y. Matsumoto. 2003. Statistical de- pendency analysis with support vector machines. In Proceedings of IWPT2003, pages 195-206.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Cross language dependency parsing using a bilingual lexicon", |
| "authors": [ |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Yan", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Chunyu", |
| "middle": [], |
| "last": "Kit", |
| "suffix": "" |
| }, |
| { |
| "first": "Guodong", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of ACL-IJCNLP2009", |
| "volume": "", |
| "issue": "", |
| "pages": "55--63", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hai Zhao, Yan Song, Chunyu Kit, and Guodong Zhou. 2009. Cross language dependency parsing us- ing a bilingual lexicon. In Proceedings of ACL- IJCNLP2009, pages 55-63, Suntec, Singapore, Au- gust. Association for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Example for a searched subtree", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF1": { |
| "text": "0-NULL:2:1-meat:3:1 the:1:3-NULL:2:3-meat:3:0 a:1:3-NULL:2:3-fork:3:0", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF2": { |
| "text": "Examples of trigram-subtrees", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF3": { |
| "text": "Example for prepositional phrases modifying a verb 2) Relative clauses precede head noun.", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF4": { |
| "text": "Example for relative clauses preceding the head noun", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF5": { |
| "text": "They are on the fringes of society .", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF6": { |
| "text": "Example of auto-parsed bilingual sentence pair", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF7": { |
| "text": "Example of features for parsing", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "text": "Top five mapping rules of 2to3 and 3to2 example, in", |
| "content": "<table/>", |
| "html": null, |
| "num": null |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "text": "Dependency parsing results of Chinesesource case", |
| "content": "<table/>", |
| "html": null, |
| "num": null |
| }, |
| "TABREF4": { |
| "type_str": "table", |
| "text": "", |
| "content": "<table><tr><td>: Dependency parsing results of English-</td></tr><tr><td>source case</td></tr><tr><td>5.2 Comparative results</td></tr></table>", |
| "html": null, |
| "num": null |
| }, |
| "TABREF6": { |
| "type_str": "table", |
| "text": "Comparative results", |
| "content": "<table/>", |
| "html": null, |
| "num": null |
| } |
| } |
| } |
| } |