| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T11:46:17.305637Z" |
| }, |
| "title": "Shallow Discourse Parsing for Under-Resourced Languages: Combining Machine Translation and Annotation Projection", |
| "authors": [ |
| { |
| "first": "Henny", |
| "middle": [], |
| "last": "Sluyter-G\u00e4thje", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Applied Computational Linguistics Potsdam", |
| "location": { |
| "country": "Germany" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Bourgonje", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Applied Computational Linguistics Potsdam", |
| "location": { |
| "country": "Germany" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Manfred", |
| "middle": [], |
| "last": "Stede", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Applied Computational Linguistics Potsdam", |
| "location": { |
| "country": "Germany" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Shallow Discourse Parsing (SDP), the identification of coherence relations between text spans, relies on large amounts of training data, which so far exists only for English-any other language is in this respect an under-resourced one. For those languages where machine translation from English is available with reasonable quality, MT in conjunction with annotation projection can be an option for producing an SDP resource. In our study, we translate the English Penn Discourse TreeBank into German and experiment with various methods of annotation projection to arrive at the German counterpart of the PDTB. We describe the key characteristics of the corpus as well as some typical sources of errors encountered during its creation. Then we evaluate the GermanPDTB by training components for selected sub-tasks of discourse parsing on this silver data and compare performance to the same components when trained on the gold, original PDTB corpus.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Shallow Discourse Parsing (SDP), the identification of coherence relations between text spans, relies on large amounts of training data, which so far exists only for English-any other language is in this respect an under-resourced one. For those languages where machine translation from English is available with reasonable quality, MT in conjunction with annotation projection can be an option for producing an SDP resource. In our study, we translate the English Penn Discourse TreeBank into German and experiment with various methods of annotation projection to arrive at the German counterpart of the PDTB. We describe the key characteristics of the corpus as well as some typical sources of errors encountered during its creation. Then we evaluate the GermanPDTB by training components for selected sub-tasks of discourse parsing on this silver data and compare performance to the same components when trained on the gold, original PDTB corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Texts are not a random collection of sentences: they are texts because they convey a certain sense of coherence. The uncovering of the coherence relations holding a text together is referred to as the task of discourse parsing. Like many other tasks based on automatically parsing input text in the larger field of Natural Language Processing, procedures often rely on the availability of training data annotated for the type of information to be extracted. In the case of coherence relations, such annotations are notoriously difficult and time-consuming to obtain, and inter-annotator agreement rates are lower than for many other tasks. As a result, the amount of available training data is comparatively small, especially for languages other than English (see Section 2.). In this paper, we present a corpus annotated for discourse relations obtained through automatically translating an existing English corpus (the Penn Discourse TreeBank, henceforth: PDTB, (Prasad et al., 2008) ), and using word alignment to project the English annotations on the German target text. The result is the GermanPDTB, a German corpus annotated for shallow discourse relations in the (financial) news domain. We provide details on the method used to create this corpus, sum up the key characteristics and use the GermanPDTB to enrich a pre-existing German connective lexicon. In addition, we provide an extrinsic evaluation of the corpus using components of a German discourse parser and compare performance of selected (sub-)tasks on GermanPDTB to the original English PDTB. The rest of this paper is structured as follows: Section 2. lists similar corpora for German and other languages. Section 3. briefly describes the different coherence annotation types in the original PDTB and consequently the German-PDTB. Section 4. explains our method of constructing the GermanPDTB and Section 5. explains the manual corrections done on this automatically produced output. Section 6.1. provides an intrinsic evaluation of the German Machine Translation output and the quality and possible sources of error for the annotation projection, and Section 6.2. provides the extrinsic evaluation, using the German-PDTB as training data for a discourse parser. Finally, Section 7. sums up our main findings and points to future work.", |
| "cite_spans": [ |
| { |
| "start": 964, |
| "end": 985, |
| "text": "(Prasad et al., 2008)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "Our starting point for the GermanPDTB is the original English PDTB in its 2.0 version (Prasad et al., 2008) . More specifically, we use the subset also used in the 2016 CoNLL shared task on discourse parsing (Xue et al., 2016) . The PDTB is by far the largest corpus annotated for coherence relations, with over 1m words and over 40k annotated relations in its 2.0 version. Other corpora annotated for coherence relations are considerably smaller, and also distributed over different frameworks, most notably Rhetorical Structure Theory (Mann and Thompson, 1988) and Segmented Discourse Representation Theory (Asher and Lascarides, 2005) . We refer the reader to Zeldes et al. (2019) for an overview of corpora for different languages and frameworks. For German, our language of interest, to date the largest annotated corpus is the Potsdam Commentary Corpus (henceforth: PCC, (Bourgonje and Stede, 2020) ) and a smaller corpus exists as a discourse annotation layer over parts of the T\u00dcBA-D/Z corpus (Versley and Gastel, 2012) . The PCC contains 2,208 relations, annotated according to the guidelines used for the PDTB2 (Prasad et al., 2008) . Because of the much larger size of the PDTB, in our experiments we hope to collect considerably more instances of discourse relations in German. Through our method, exploiting machine translation and annotation projection, we will extract silver data in the sense that the resulting annotations cannot be guaranteed to be correct (i.e., they are not all checked by a human, though the next sections describe heuristics for quality assurance), but because of the much larger size of the PDTB, we end up with many more instances of relations in German, and from a slightly different domain, with the PCC representing the news editorial/commentary domain, and PDTB articles representing the financial news domain. The procedure of annotation projection has been used in the context of coherence relations before, but remained restricted to explicit discourse connectives, for example to create or extend discourse lexicons and disambiguate connectives (English-French (Laali and Kosseim, 2014) , English-Chinese (Zhou et al., 2012) and German-Italian (Bourgonje et al., 2017) ), to compile a metric to score machine translation output (English-Arabic (Hajlaoui and Popescu-Belis, 2013) ) or to create a corpus annotated with discourse markers to train a parser (English-German (Versley, 2010) and English-French (Laali, 2017) ). The novelty of our work lies in using the procedure for entire coherence relations, as opposed to restricting it to connectives. In contrast to Versley (2010) and Laali and Kosseim (2014) who use existing parallel corpora for which they automatically annotated the English side, we create a parallel corpus by machine-translating the manually annotated PDTB. After machine translation, we rely on word alignments produced with GIZA++ (Och and Ney, 2003) that are post-processed using some heuristics implemented in the Moses statistical machine translation system (Koehn et al., 2007) . In addition, we perform an automatic corpus analysis to examine how different types of annotation interdepend, and accordingly we compile rules for the projection process.", |
| "cite_spans": [ |
| { |
| "start": 86, |
| "end": 107, |
| "text": "(Prasad et al., 2008)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 208, |
| "end": 226, |
| "text": "(Xue et al., 2016)", |
| "ref_id": null |
| }, |
| { |
| "start": 537, |
| "end": 562, |
| "text": "(Mann and Thompson, 1988)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 609, |
| "end": 637, |
| "text": "(Asher and Lascarides, 2005)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 663, |
| "end": 683, |
| "text": "Zeldes et al. (2019)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 877, |
| "end": 904, |
| "text": "(Bourgonje and Stede, 2020)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1001, |
| "end": 1027, |
| "text": "(Versley and Gastel, 2012)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 1121, |
| "end": 1142, |
| "text": "(Prasad et al., 2008)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 2110, |
| "end": 2135, |
| "text": "(Laali and Kosseim, 2014)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 2154, |
| "end": 2173, |
| "text": "(Zhou et al., 2012)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 2193, |
| "end": 2217, |
| "text": "(Bourgonje et al., 2017)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 2293, |
| "end": 2327, |
| "text": "(Hajlaoui and Popescu-Belis, 2013)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 2403, |
| "end": 2434, |
| "text": "(English-German (Versley, 2010)", |
| "ref_id": null |
| }, |
| { |
| "start": 2454, |
| "end": 2467, |
| "text": "(Laali, 2017)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 2615, |
| "end": 2629, |
| "text": "Versley (2010)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 2634, |
| "end": 2658, |
| "text": "Laali and Kosseim (2014)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 2905, |
| "end": 2924, |
| "text": "(Och and Ney, 2003)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 3035, |
| "end": 3055, |
| "text": "(Koehn et al., 2007)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2." |
| }, |
| { |
| "text": "In the PDTB framework, coherence annotations are divided into five different relation types.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation Structure", |
| "sec_num": "3." |
| }, |
| { |
| "text": "(1) Explicit relations consist of an overtly realised discourse connective (such as because, although, if ) and two arguments; one external argument (Arg1) and one internal argument (Arg2), the latter being syntactically integrated with the discourse connective. Finally, they contain a relation sense, to be selected from the PDTB sense hierarchy (see Prasad et al. (2008) ). Arg1 and Arg2 are referred to as such because this reflects the unmarked order of the arguments, but the reverse order can occur as well. Explicit relations make up~43% of all relations (see also Table 3 ).", |
| "cite_spans": [ |
| { |
| "start": 353, |
| "end": 373, |
| "text": "Prasad et al. (2008)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 573, |
| "end": 580, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Annotation Structure", |
| "sec_num": "3." |
| }, |
| { |
| "text": "(2) Implicit relations consist of two arguments (Arg1 and Arg2) only, because an overtly realised connective was considered redundant by the author; in a sequence like \"Mary broke her leg. She could not attend the festival the next day.\", a causal relation can easily be inferred without an explicit connective. Corresponding to the PDTB annotation guidelines, for implicit relations the annotators specified the connective that could be inserted between the two arguments (but crucially is not present in the text). Finally, they equally contain a relation sense. Implicit relations make up 38% of all relations. (3-5) If between two adjacent segments (typically sentences), neither an explicit nor an implicit relation could be assigned, the annotator furthermore had the option to choose between the remaining three types of entity relation (EntRel), alternative lexicalisation (AltLex) or no relation (NoRel). EntRel cases (~12% of all relations) are those where no particular relation from the PDTB sense hierarchy could be assigned, but the two segments speak of the same entities. As such, they only contain two ar-guments (and no -explicit or implicit -connective). Al-tLex cases (~2% of all relations) are those where the relation was explicitly expressed through something other than a discourse connective. Discourse connectives are seen as a closed class (though different theories and frameworks disagree on specifics), and a typical alternative lexicalisation would be At that time, expressing a Temporal.Synchronous relation sense. Finally, NoRel cases (~0.6% of all relations) are those where no relation between two adjacent segments could be established by the annotator. We adopt this scheme and attempt to project any relation (except NoRel) found in the PDTB onto the GermanPDTB.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation Structure", |
| "sec_num": "3." |
| }, |
| { |
| "text": "The creation of the GermanPDTB can be decomposed into several steps, explained in more detail in the following subsections. First, we need to create a parallel, sentencealigned corpus, comprising the raw, English text of the original PDTB on the one hand, and the raw, German text of the GermanPDTB in-the-making on the other hand. Second, we extract word alignments from the parallel sentences. Third, we establish a set of heuristics based on the different annotation types present in the PDTB. In the process, we extend an already existing German lexicon of connectives: DiMLex (Stede, 2002) . First introduced in 1998, this lexicon has been extended and refined over the last 20 years, resulting in a relatively exhaustive and stable lexicon of German discourse connectives. Still, in the process of creating the GermanPDTB, we found several items we consider candidate entries for the lexicon.", |
| "cite_spans": [ |
| { |
| "start": 581, |
| "end": 594, |
| "text": "(Stede, 2002)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4." |
| }, |
| { |
| "text": "We use machine translation to produce a parallel corpus. We considered and tested five different systems -Google Translate 1 , DeepL 2 , Bing 3 , Edinburgh's Neural Machine Translation system (Sennrich et al., 2016) and Moses (Koehn et al., 2007) -by translating the English side of a parallel news corpus (Tiedemann, 2012) and scoring the translation using BLEU (Papineni et al., 2002) . Google Translate and DeepL produced the best translations with BLEU scores of 26.6 and 28.07 respectively, so we proceeded with these two systems and translated the English raw text of the PDTB. Though the BLEU scores are not particularly good, we determined by manual inspection that the translations can generally be considered good enough for creating the corpus. Next, we performed a separate manual evaluation on a subset of 50 sentences, following the approach proposed by Popovic et al. (2013) . Since the translations for these 50 sentences were of equal quality for both systems, we determined for which one a direct alignment (German-English) retrieved more explicit connectives. As this was the case for the DeepL translation, we continued to work with this system.", |
| "cite_spans": [ |
| { |
| "start": 192, |
| "end": 215, |
| "text": "(Sennrich et al., 2016)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 226, |
| "end": 246, |
| "text": "(Koehn et al., 2007)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 306, |
| "end": 323, |
| "text": "(Tiedemann, 2012)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 363, |
| "end": 386, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 868, |
| "end": 889, |
| "text": "Popovic et al. (2013)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Creation of the Parallel Corpus", |
| "sec_num": "4.1." |
| }, |
| { |
| "text": "Having obtained parallel English-German sentences, we proceeded with extracting word alignment using GIZA++.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Alignment Heuristics", |
| "sec_num": "4.2." |
| }, |
| { |
| "text": "First experiments with direct alignments were not promising and we encountered similar issues as those reported by Laali (2017) . We therefore applied additional alignment heuristics implemented in Moses (in which GIZA++ is executed using IBM Model 4), similar to Laali (2017) who used the intersection and the grow-diag, and to Versley (2010) who used the intersection and the grow-diag-final heuristics. We experimented with six alignment versions in total. To evaluate these, we extracted the aligned German connective candidates and matched them against DiMLex to see how many are found. All six versions build on the direct (English to German) and the inverse alignment (German to English). The intersection only contains alignment points that appear in the direct and the inverse alignment, while the union contains all alignment points from both alignments. The four remaining heuristics augment the intersection with alignment points from the union in various ways, as proposed by Och and Ney (2003) . In the grow heuristic, word pairs neighbouring already aligned word pairs are aligned if they occur in the union. The grow-diag heuristic extends the notion of neighbouring words and is therefore less restrictive. In the growdiag-final heuristic, a final step is added in which remaining word pairs get aligned if one word is not yet aligned and the word pair is aligned in the union. The grow-diagfinal-and implements a more restrictive final step in which a word pair is only checked against the union if both words are not yet aligned. In short, the grow method is the most restrictive, followed by the grow-diag, the grow-diag-finaland and the grow-diag-final method. The more restrictive a heuristic is, the more precise it is, but the fewer connectives are found in total. The results for the six different methods are presented in Table 1 . We decided to favour precision over recall, and as the intersection version is the most precise, we use this heuristic as default alignment for projecting the connectives. For projecting the arguments, all alignment versions are used and a majority vote is retrieved. The same applies if in the intersection version a discourse marker is aligned to \"NULL\" or if the aligned word is not found in DiMLex.", |
| "cite_spans": [ |
| { |
| "start": 115, |
| "end": 127, |
| "text": "Laali (2017)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 989, |
| "end": 1007, |
| "text": "Och and Ney (2003)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1848, |
| "end": 1855, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Alignment Heuristics", |
| "sec_num": "4.2." |
| }, |
| { |
| "text": "Using our heuristic of choice as described above, we manually analysed a subset of the projected (German) explicit connectives that were not found in DiMLex, allowing us to find sources of error in alignment/projection. The majority of cases that emerged from this manually-analysed subset evolve around modifiers for discourse markers. In the PDTB, modifiers (for example temporal modifications and focus particles as in shortly thereafter and especially if ) are annotated as part of the explicit connective. In this respect, DiMLex has a more strict definition and includes the head of the explicit connective only, while regarding the modifier as an optional element (some of those are focus particles whose combination with connectives is restricted, which is also recorded in the DiMLex entries). To be able to reliably evaluate the explicit connectives we only annotate the \"pure\" form in the GermanPDTB, i.e., we iterate over all (German) words that are aligned to the (English) explicit connective and only annotate the ones matching an entry in DiMLex. After this step, some explicit connectives were found to be correctly aligned yet not present in DiM-Lex. For such cases, inspired by Meyer and Webber (2013), we extracted all explicit discourse markers from the PDTB, translated them with DeepL, checked them against DiMLex and discussed the ones not yet present. This resulted in 17 candidates that can be considered as additions to DiMLex.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extension of the Lexicon", |
| "sec_num": "4.3." |
| }, |
| { |
| "text": "We project the annotations from the English to the German side of the parallel corpus sentence-wise and relation-wise (several discourse relations can be annotated for one sentence). To not rely on the word alignments alone, we conducted an automatic analysis of the PDTB and compiled rules for the projection of the arguments and for the projection of the relations that are not explicit. For example, if an argument spans a whole sentence, the projection is possibly based on sentence alignment alone, and no word alignments are needed. Since the position of the arguments depends on the position of the connective, we start the projection by checking if a connective is present in the English sentence. If this is the case, we retrieve the alignment and check the word(s) to which the English connective is(are) aligned against DiMLex. If the result set is empty, we retrieve alternative alignments (see Section 4.2.) and look up these alignments. If this results in a non-empty set, we proceed with this instead. If none of the alternative alignment procedures resulted in a German word or phrase present in DiMLex, we extract all n-grams in a window with a size of five tokens around each connective and check if any is present in DiMLex with a matching relation sense. If this resulted in an empty result set too, we manually annotate the connective.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Projection", |
| "sec_num": "4.4." |
| }, |
| { |
| "text": "For implicit and AltLex relations, we want to assign the relation sense to the appropriate word or phrase (the alternative lexicalisation in the AltLex case, typically the first word of Arg2 in the implicit case), but in this case we cannot check the alignments against DiMLex. So for these relation types, we check whether the tags can be transferred using a rule (e.g. is annotated to the first word of a sentence); if they cannot, we retrieve a majority vote on the tag's position with all six alignment versions. EntRel relations are always annotated to the first word of the second argument, therefore the projection is included in the argument projection process.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Projection", |
| "sec_num": "4.4." |
| }, |
| { |
| "text": "The arguments, for all relation types, are projected in the following way; if the argument is continuous, a majority vote is compiled for the start and the end of the argument. Otherwise, we split the argument into continuous parts and retrieve the majority vote for the start and the end of each part.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Projection", |
| "sec_num": "4.4." |
| }, |
| { |
| "text": "Furthermore, to correctly annotate the implicit relations we created an English-German mapping for the connectives to be inserted. We further PoS-tagged the German raw text using MarMoT (M\u00fcller et al., 2013) to be able to present the GermanPDTB enriched with the same information as the PDTB. Table 2 : Most frequent explicit connectives that were annotated as implicit relations.", |
| "cite_spans": [ |
| { |
| "start": 186, |
| "end": 207, |
| "text": "(M\u00fcller et al., 2013)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 293, |
| "end": 300, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Projection", |
| "sec_num": "4.4." |
| }, |
| { |
| "text": "After going through the procedures outlined above, 2.7% of the explicit connectives needed to be manually corrected, mainly due to four reasons.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Correction", |
| "sec_num": "5." |
| }, |
| { |
| "text": "1. The connective was correctly annotated but is not in DiMLex. These cases were discussed and four of them were considered candidates for DiMLex, resulting in 21 candidate entries for DiMLex, in total.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Correction", |
| "sec_num": "5." |
| }, |
| { |
| "text": "The connective was present in the sentence, but not found through alignment or by looking for it around the projected position. This was mostly due to the window size of five being too narrow in some cases (further increasing the window size however led to the inclusion of too much noise).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "2.", |
| "sec_num": null |
| }, |
| { |
| "text": "3. The connective was not present in the translation. Explicit relations in some language A can tend to be expressed more often implicitly in language B. This zero-translation case is a known problem in the literature (Meyer and Webber, 2013) . 4 . No discourse relation is expressed in the translation mostly because a part of the translation is missing. Table 2 displays the most frequent explicit connectives that were expressed through an implicit relation in German. In total, 164 explicit relations turned implicit. if was often not translated, mostly when it was at the beginning of a sentence, and however was mostly omitted when inserted within the sentence. Examples of both cases are provided below.", |
| "cite_spans": [ |
| { |
| "start": 218, |
| "end": 246, |
| "text": "(Meyer and Webber, 2013) . 4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 356, |
| "end": 363, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "2.", |
| "sec_num": null |
| }, |
| { |
| "text": "EN If, by that time, the network reaches 14 million homes, the contract will be renewed for five more years.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "2.", |
| "sec_num": null |
| }, |
| { |
| "text": "DE Erreicht das Netz bis zu diesem Zeitpunkt 14 Millionen Haushalte, wird der Vertrag um weitere f\u00fcnf Jahre verl\u00e4ngert.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "2.", |
| "sec_num": null |
| }, |
| { |
| "text": "EN Few small neighborhood businesses, however, can afford such protection, even in collaboration with other local merchants.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "2.", |
| "sec_num": null |
| }, |
| { |
| "text": "DE Nur wenige kleine Unternehmen in der Nachbarschaft k\u00f6nnen sich einen solchen Schutz leisten, auch in Zusammenarbeit mit anderen lokalen H\u00e4ndlern.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "2.", |
| "sec_num": null |
| }, |
| { |
| "text": "An example of an incorrect alignment due to translation error is shown below, where the phrase but that won't be enough is missing in the German target text.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "2.", |
| "sec_num": null |
| }, |
| { |
| "text": "EN Mr. Koch already has announced he will drop 3,200 jobs from the city payroll, but that won't be enough.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "2.", |
| "sec_num": null |
| }, |
| { |
| "text": "DE Koch hat bereits angek\u00fcndigt, dass er 3.200 Stellen von der Lohnliste der Stadt streichen wird.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "2.", |
| "sec_num": null |
| }, |
| { |
| "text": "We provide two types of evaluation. The intrinsic evaluation (first subsection) focuses on the output of the translation and projection procedures, and discusses frequent sources of errors. The extrinsic evaluation (second subsection) describes experiments using the obtained silver data for discourse parsing, i.e., it evaluates the quality of the output with regard to usability as training data for the parsing task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "6." |
| }, |
| { |
| "text": "Some key characteristics of the original PDTB and the Ger-manPDTB are summarised in Table 3 . Due to the manual correction of the explicit connectives, there are fewer discourse relations in total in the GermanPDTB. There are also fewer explicit but more implicit relations; the number of AltLex and EntRel relations stays the same. The PDTB has more unique discourse markers than the Ger-manPDTB. This is most likely due to modifiers, which in the PDTB are part of the connective, but not so in DiMLex (see Section 4.3.). For comparison, we also extracted the heads of the connective in a naive way by only considering the last word. This method fails for some connectives, e.g., for on the contrary. Since the GermanPDTB contains more unique \"naive\" heads than the PDTB, this compensates for the difference in ambiguity of discourse connectives. In other words, when considering the full connective, German seems to be more ambiguous, because it expresses roughly the same number of relations/senses with fewer unique connectives. However, when looking at the head, the situation is reversed. We have not quantified in how many cases our naive way of extracting the head results in wrong heads being extracted, though. Total relations 39,319 39,311 Explicit relations 16,888 16,670 Implicit relations 15, Table 3 : Key characteristics of original PDTB and GermanPDTB.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 84, |
| "end": 91, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 1221, |
| "end": 1313, |
| "text": "Total relations 39,319 39,311 Explicit relations 16,888 16,670 Implicit relations 15,", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 1314, |
| "end": 1321, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Intrinsic Evaluation", |
| "sec_num": "6.1." |
| }, |
| { |
| "text": "To further evaluate our corpus we manually examined 150 discourse relations. Based on this, we distinguish between four kinds of errors:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PDTB GermanPDTB", |
| "sec_num": null |
| }, |
| { |
| "text": "1. Punctuation errors: A punctuation mark is not included in the annotation even though it is included in the PDTB relation, or vice versa.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PDTB GermanPDTB", |
| "sec_num": null |
| }, |
| { |
| "text": "2. Minor word errors: One word is not included in the annotation even though it is included in the PDTB relation, or vice versa.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PDTB GermanPDTB", |
| "sec_num": null |
| }, |
| { |
| "text": "3. Severe word errors: More than one word is not included in the annotation even though it is included in the PDTB relation, or vice versa.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PDTB GermanPDTB", |
| "sec_num": null |
| }, |
| { |
| "text": "The connective is wrongly annotated.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connective errors:", |
| "sec_num": "4." |
| }, |
| { |
| "text": "In the manually examined set, 141 out of the 150 relations were accurate (94%). In three cases (2%) there were punctuation errors; furthermore there were four minor (3%) and two severe (1%) word errors. Only severe word errors render an annotation useless, so based on this manual, intrinsic evaluation, we can conclude that 99% of cases are usable for our purposes. With the set of relations under investigation being very small though (150 relations, which is <1% of all relations), a larger sample size would provide a more reliable perspective.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connective errors:", |
| "sec_num": "4." |
| }, |
| { |
| "text": "Having established the quality of the annotations by looking at the relations themselves, we now turn to a more usecase driven evaluation. Specifically, we use individual components of a German discourse parser currently under development to assess the suitability of the obtained data for the tasks of connective disambiguation and argument extraction. To put performance into perspective, we compare performance of these components to their performance on the original, English PDTB data. All scores reported on are the result of 10-fold cross-validation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extrinsic Evaluation", |
| "sec_num": "6.2." |
| }, |
| { |
| "text": "First, we establish the quality of the projected data with regard to the task of connective disambiguation. To exemplify the task, consider the sentences in (1) and (2).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connective Disambiguation", |
| "sec_num": "6.2.1." |
| }, |
| { |
| "text": "(1) A small but significant effect.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connective Disambiguation", |
| "sec_num": "6.2.1." |
| }, |
| { |
| "text": "(2) Lucy had very little contact with the folks outside her cubicle day, but she found it suitable and she liked it that way.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connective Disambiguation", |
| "sec_num": "6.2.1." |
| }, |
| { |
| "text": "While the but in (1) simply coordinates two noun phrases (with the noun elided in the first NP), the but in (2) indicates a relation between two propositions, and puts them in a contrastive relation. Our task entails binary classification, classifying candidates as having either sentential (as in (1)) or discourse (as in (2)) reading. Using the classifier described in (Bourgonje and Stede, 2018) , on the German-PDTB data, we get a binary f1-score of 94.04. When using the same classifier on the English PDTB, Bourgonje and Stede (2018) report a very similar binary f1-score of 93.64. Comparing this, in turn, to the English competition, we note that the overall winning system of the 2016 CoNLL shared task on discourse parsing (Oepen et al., 2016) reports an f1score of 91.79 for the sub-task of connective disambiguation. The system with the highest score for this sub-task in that same competition, however, achieved an f1-score of 98.38 (Li et al., 2016) . We suspect the difference in performance to be due to language-specifics, similar to those reported in Section 5., where German in some cases tends to implicit realisation, whereas English uses an explicit form. Further investigation would be needed to find the root cause of the 0.4 point difference in f1-score, but we consider the fact that scores are relatively close together a confirmation of generally good quality which we observed from manual evaluation in Section 6.1.", |
| "cite_spans": [ |
| { |
| "start": 371, |
| "end": 398, |
| "text": "(Bourgonje and Stede, 2018)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 732, |
| "end": 752, |
| "text": "(Oepen et al., 2016)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 945, |
| "end": 962, |
| "text": "(Li et al., 2016)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connective Disambiguation", |
| "sec_num": "6.2.1." |
| }, |
| { |
| "text": "The second component on which we evaluate the German-PDTB is argument extraction. In the PDTB framework, each coherence relation has two arguments which are put in some kind of relation to each other. Consider the sentence in (3).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Argument Extraction", |
| "sec_num": "6.2.2." |
| }, |
| { |
| "text": "(3) powerful political pressures may convince the Conservative government to keep its so-called golden share, which limits any individual holding to 15%, until the restriction expires on Dec. 31, 1990 (from the PDTB2.0: WSJ 0745)", |
| "cite_spans": [ |
| { |
| "start": 187, |
| "end": 200, |
| "text": "Dec. 31, 1990", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Argument Extraction", |
| "sec_num": "6.2.2." |
| }, |
| { |
| "text": "The first argument is in italics and the second argument in bold face. The task of argument extraction is to decide upon the scope of both arguments and to extract (in the optimal case) the exact token span that makes up the argument. As our discourse parser for German only works for explicit relations so far, the scores reported here for argument extraction are based on the 16,670 explicit relations in the Ger-manPDTB only. We use the approach described in (Bourgonje and Stede, 2019) and we follow their evaluation metric, which for every argument measures the token overlap between the actual and the predicted argument in the sense that every token that truly belongs to the argument and is classified as such results in a true positive; every token that does not belong to the argument and is classified as such results in a false positive; and every token that belongs to the argument and is not classified as such results in a false negative. Scores are averaged over 10 cross-validation runs, and use the connective annotation from the GermanPDTB directly, instead of using the classifier to predict the presence of connectives. When using the classifiers in combination with heuristics, we get an f1-score of 62.45 for Arg1 spans and 81.33 for Arg2 spans. The corresponding numbers for English reported by Bourgonje and Stede (2019) are 59.35 and 88.63, meaning that interestingly, Arg1 spans are easier to detect in the GermanPDTB, while Arg2 spans are more difficult to detect, compared to the original PDTB. We refer the reader to (Bourgonje and Stede, 2019) , Section 5 for more details on how this compares to other competitors. Upon manual investigation, we found that for both argument types (Arg1 and Arg2), attribution was a frequent source of error. The heuristics described in (Bourgonje and Stede, 2019) were devised based on the PCC, which consists of news commentary and contains very few cases of attribution. The PDTB contains such cases much more frequently (Prasad et al., 2006) , and the token span expressing the attribution is typically left out of the annotated argument, but is included by the heuristics. This is supported by the lower precision and higher recall for both Arg1 spans and Arg2 spans (59.08 (precision), 66.25 (recall) and 78.79 (precision), 84.04 (recall), respectively). This, however, impacts both the German and English processing, and does not explain the difference in performance between the two. We leave further investigation into the cause for this difference to future work.", |
| "cite_spans": [ |
| { |
| "start": 1319, |
| "end": 1345, |
| "text": "Bourgonje and Stede (2019)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 1547, |
| "end": 1574, |
| "text": "(Bourgonje and Stede, 2019)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 1801, |
| "end": 1828, |
| "text": "(Bourgonje and Stede, 2019)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 1988, |
| "end": 2009, |
| "text": "(Prasad et al., 2006)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Argument Extraction", |
| "sec_num": "6.2.2." |
| }, |
| { |
| "text": "We demonstrate how a large discourse-annotated corpus can be created by machine-translating the original English Penn Discourse TreeBank and exploiting word alignments to project the annotations over the English text onto the translated -in our case, German -text. 4 We discuss the procedure used to obtain the corpus and evaluate it by manually establishing the quality of the annotations on the German text for a small subset of the corpus. Additionally, in this process, we identify 21 candidates that we consider potentially valuable additions to DiMLex (a German connective lexicon). For an extrinsic evaluation, we use the obtained corpus as training data for selected sub-tasks (connective classification and argument extraction) of the larger task of discourse parsing and compare the obtained results to the same architectures trained on the original English, obtaining similar results for the two sub-tasks under investigation. 4 The release of the data via LDC is currently in preparation.", |
| "cite_spans": [ |
| { |
| "start": 265, |
| "end": 266, |
| "text": "4", |
| "ref_id": null |
| }, |
| { |
| "start": 938, |
| "end": 939, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion & Future Work", |
| "sec_num": "7." |
| }, |
| { |
| "text": "Another important piece of future work is the further extrinsic evaluation of the corpus using a German discourse parser currently under development. Once this component is available for the parser, we plan to use the GermanPDTB for the sub-task of sense classification (the next step after connective classification and argument extraction in a typical pipeline setup). In addition, we plan to establish whether or not individual components trained on the Ger-manPDTB improve performance when evaluating on a gold corpus, the Potsdam Commentary Corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion & Future Work", |
| "sec_num": "7." |
| }, |
| { |
| "text": "https://translate.google.com/ 2 https://www.deepl.com/en/translator 3 https://www.bing.com/translator", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "Funded by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) -323949969. We would like to thank the anonymous reviewers for their helpful comments on an earlier version of this manuscript.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Logics of Conversation", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Asher", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Lascarides", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Studies in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Asher, N. and Lascarides, A. (2005). Logics of Conver- sation. Studies in Natural Language Processing. Cam- bridge University Press.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Identifying Explicit Discourse Connectives in German", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Bourgonje", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Stede", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 19th Annual SIGdial Meeting on Discourse and Dialogue", |
| "volume": "", |
| "issue": "", |
| "pages": "327--331", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bourgonje, P. and Stede, M. (2018). Identifying Explicit Discourse Connectives in German. In Proceedings of the 19th Annual SIGdial Meeting on Discourse and Di- alogue, pages 327-331, Melbourne, Australia. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Explicit Discourse Argument Extraction for German", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Bourgonje", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Stede", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 21st International Conference on Text, Speech and Dialogue", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bourgonje, P. and Stede, M. (2019). Explicit Discourse Argument Extraction for German. In Proceedings of the 21st International Conference on Text, Speech and Dia- logue, Ljubljana, Slovenia.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "The Potsdam Commentary Corpus 2.2: Extending Annotations for Shallow Discourse Parsing", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Bourgonje", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Stede", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 12th International Conference on Language Resources and Evaluation (LREC'20)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bourgonje, P. and Stede, M. (2020). The Potsdam Com- mentary Corpus 2.2: Extending Annotations for Shallow Discourse Parsing. In Proceedings of the 12th Interna- tional Conference on Language Resources and Evalua- tion (LREC'20), Paris, France, May. European Language Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Toward a bilingual lexical database on connectives: Exploiting a German/Italian parallel corpus", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Bourgonje", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Grishina", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Stede", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Fourth Italian Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bourgonje, P., Grishina, Y., and Stede, M. (2017). Toward a bilingual lexical database on connectives: Exploiting a German/Italian parallel corpus. In Proceedings of the Fourth Italian Conference on Computational Linguistics, Rome, Italy, December.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Assessing the Accuracy of Discourse Connective Translations: Validation of an Automatic Metric", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Hajlaoui", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Popescu-Belis", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "14th International Conference on Intelligent Text Processing and Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hajlaoui, N. and Popescu-Belis, A. (2013). Assessing the Accuracy of Discourse Connective Translations: Vali- dation of an Automatic Metric. In 14th International Conference on Intelligent Text Processing and Compu- tational Linguistics. Springer.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Moses: Open Source Toolkit for Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Hoang", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Federico", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Bertoldi", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Cowan", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Moran", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Zens", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Constantin", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Herbst", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics Companion", |
| "volume": "", |
| "issue": "", |
| "pages": "177--180", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Koehn, P., Hoang, H., Birch, A., Callison-Burch, C., Fed- erico, M., Bertoldi, N., Cowan, B., Shen, W., Moran, C., Zens, R., Dyer, C., Bojar, O., Constantin, A., and Herbst, E. (2007). Moses: Open Source Toolkit for Statistical Machine Translation. In Proceedings of the 45th Annual Meeting of the Association for Computational Linguis- tics Companion, pages 177-180, Prague, Czech Repub- lic, June. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Inducing Discourse Connectives from Parallel Texts", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Laali", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Kosseim", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "610--619", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Laali, M. and Kosseim, L. (2014). Inducing Discourse Connectives from Parallel Texts. In Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers, pages 610-619, Dublin, Ireland, August. Dublin City Univer- sity and Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Inducing Discourse Resources Using Annotation Projection", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Laali", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Laali, M. (2017). Inducing Discourse Resources Using An- notation Projection. Ph.D. thesis, Concordia University.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "A Constituent Syntactic Parse Tree Based Discourse Parser", |
| "authors": [ |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wang", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the CoNLL-16 shared task", |
| "volume": "", |
| "issue": "", |
| "pages": "60--64", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Li, Z., Zhao, H., Pang, C., Wang, L., and Wang, H. (2016). A Constituent Syntactic Parse Tree Based Dis- course Parser. In Proceedings of the CoNLL-16 shared task, pages 60-64, Berlin, Germany, August. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Rhetorical Structure Theory: Towards a Functional Theory of Text Organization", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Mann", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Thompson", |
| "suffix": "" |
| } |
| ], |
| "year": 1988, |
| "venue": "Text", |
| "volume": "8", |
| "issue": "", |
| "pages": "243--281", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mann, W. and Thompson, S. (1988). Rhetorical Structure Theory: Towards a Functional Theory of Text Organiza- tion. Text, 8:243-281.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Implicitation of Discourse Connectives in (Machine) Translation", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Meyer", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Webber", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the Workshop on Discourse in Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "19--26", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Meyer, T. and Webber, B. (2013). Implicitation of Dis- course Connectives in (Machine) Translation. In Pro- ceedings of the Workshop on Discourse in Machine Translation, pages 19-26.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Efficient higher-order CRFs for morphological tagging", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "M\u00fcller", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Schmid", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "322--332", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M\u00fcller, T., Schmid, H., and Sch\u00fctze, H. (2013). Efficient higher-order CRFs for morphological tagging. In Pro- ceedings of the 2013 Conference on Empirical Methods in Natural Language Processing, pages 322-332.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "A Systematic Comparison of Various Statistical Alignment Models", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [ |
| "J" |
| ], |
| "last": "Och", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Computational Linguistics", |
| "volume": "29", |
| "issue": "1", |
| "pages": "19--51", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Och, F. J. and Ney, H. (2003). A Systematic Comparison of Various Statistical Alignment Models. Computational Linguistics, 29(1):19-51.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "OPT: Oslo-Potsdam-Teesside-Pipelining Rules, Rankers, and Classifier Ensembles for Shallow Discourse Parsing", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Oepen", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Read", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Scheffler", |
| "suffix": "" |
| }, |
| { |
| "first": "U", |
| "middle": [], |
| "last": "Sidarenka", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Stede", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Velldal", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "\u00d8vrelid", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the CONLL 2016 Shared Task", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oepen, S., Read, J., Scheffler, T., Sidarenka, U., Stede, M., Velldal, E., and \u00d8vrelid, L. (2016). OPT: Oslo-Potsdam-Teesside-Pipelining Rules, Rankers, and Classifier Ensembles for Shallow Discourse Parsing. In Proceedings of the CONLL 2016 Shared Task, Berlin.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Bleu: a Method for Automatic Evaluation of Machine Translation", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "W.-J", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Papineni, K., Roukos, S., Ward, T., and Zhu, W.-J. (2002). Bleu: a Method for Automatic Evaluation of Machine Translation. In Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA, July. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Learning from human judgments of machine translation output", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Popovic", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Avramidis", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Burchardt", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Hunsicker", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Schmeier", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Tscherwinka", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Vilar", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the MT Summit XIV. Machine Translation Summit (MT-Summit-2013)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Popovic, M., Avramidis, E., Burchardt, A., Hunsicker, S., Schmeier, S., Tscherwinka, C., Vilar, D., and Uszko- reit, H. (2013). Learning from human judgments of machine translation output. In Proceedings of the MT Summit XIV. Machine Translation Summit (MT-Summit- 2013), Nice, France, September. The European Associa- tion for Machine Translation.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Annotating Attribution in the Penn Discourse TreeBank", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Prasad", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Dinesh", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Webber", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the Workshop on Sentiment and Subjectivity in Text, SST '06", |
| "volume": "", |
| "issue": "", |
| "pages": "31--38", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Prasad, R., Dinesh, N., Lee, A., Joshi, A., and Webber, B. (2006). Annotating Attribution in the Penn Discourse TreeBank. In Proceedings of the Workshop on Sentiment and Subjectivity in Text, SST '06, pages 31-38, Strouds- burg, PA, USA. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "The Penn Discourse TreeBank 2.0", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Prasad", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Dinesh", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Miltsakaki", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Robaldo", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Webber", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the Sixth International Conference on Language Resources and Evaluation (LREC'08)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Prasad, R., Dinesh, N., Lee, A., Miltsakaki, E., Robaldo, L., Joshi, A., and Webber, B. (2008). The Penn Dis- course TreeBank 2.0. In Proceedings of the Sixth Inter- national Conference on Language Resources and Evalu- ation (LREC'08), Marrakech, Morocco, May. European Language Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Edinburgh Neural Machine Translation Systems for WMT 16", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the First Conference on Machine Translation", |
| "volume": "2", |
| "issue": "", |
| "pages": "371--376", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sennrich, R., Haddow, B., and Birch, A. (2016). Edin- burgh Neural Machine Translation Systems for WMT 16. In Proceedings of the First Conference on Machine Translation: Volume 2, Shared Task Papers, pages 371- 376, Berlin, Germany, August. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "DiMLex: A Lexical Approach to Discourse Markers", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Stede", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Exploring the Lexicon -Theory and Computation. Edizioni dell'Orso", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stede, M. (2002). DiMLex: A Lexical Approach to Dis- course Markers. In Lenci A. et al., editors, Exploring the Lexicon -Theory and Computation. Edizioni dell'Orso, Alessandria.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Parallel data, tools and interfaces in OPUS", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Tiedemann", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC'12)", |
| "volume": "", |
| "issue": "", |
| "pages": "2214--2218", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tiedemann, J. (2012). Parallel data, tools and interfaces in OPUS. In Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC'12), pages 2214-2218, Istanbul, Turkey, May. European Language Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Linguistic Tests for Discourse Relations in the T\u00fcBa-D/Z corpus of written German", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Versley", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Gastel", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Dialogue and Discourse", |
| "volume": "4", |
| "issue": "2", |
| "pages": "142--173", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Versley, Y. and Gastel, A. (2012). Linguistic Tests for Dis- course Relations in the T\u00fcBa-D/Z corpus of written Ger- man. Dialogue and Discourse, 4(2):142-173.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Discovery of ambiguous and unambiguous discourse connectives via annotation projection", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Versley", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of Workshop on Annotation and Exploitation of Parallel Corpora (AEPC)", |
| "volume": "", |
| "issue": "", |
| "pages": "83--92", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Versley, Y. (2010). Discovery of ambiguous and unam- biguous discourse connectives via annotation projection. In L. Ahrenberg, et al., editors, Proceedings of Work- shop on Annotation and Exploitation of Parallel Corpora (AEPC), pages 83-92. Northern European Association for Language Technology (NEALT).", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Shared Task on Multilingual Shallow Discourse Parsing", |
| "authors": [], |
| "year": null, |
| "venue": "Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1--19", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shared Task on Multilingual Shallow Discourse Parsing. In Proceedings of the CoNLL-16 shared task, pages 1- 19, Berlin, Germany, August. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "The DISRPT 2019 Shared Task on Elementary Discourse Unit Segmentation and Connective Detection", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Zeldes", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [ |
| "G" |
| ], |
| "last": "Maziero", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Antonio", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Iruskieta", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Workshop on Discourse Relation Parsing and Treebanking", |
| "volume": "", |
| "issue": "", |
| "pages": "97--104", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zeldes, A., Das, D., Maziero, E. G., Antonio, J., and Iruski- eta, M. (2019). The DISRPT 2019 Shared Task on El- ementary Discourse Unit Segmentation and Connective Detection. In Proceedings of the Workshop on Discourse Relation Parsing and Treebanking 2019, pages 97-104, Minneapolis, MN, June. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Cross-Lingual Identification of Ambiguous Discourse Connectives for Resource-Poor Language", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "K.-F", |
| "middle": [], |
| "last": "Wong", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "The COLING 2012 Organizing Committee", |
| "volume": "", |
| "issue": "", |
| "pages": "1409--1418", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhou, L., Gao, W., Li, B., Wei, Z., and Wong, K.-F. (2012). Cross-Lingual Identification of Ambiguous Discourse Connectives for Resource-Poor Language. In Proceed- ings of COLING 2012: Posters, pages 1409-1418, Mum- bai, India, December. The COLING 2012 Organizing Committee.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td colspan=\"2\">Heuristic/Category</td><td colspan=\"3\">% of explicits not found Total number explicits Null alignments</td></tr><tr><td>Intersection</td><td/><td>8.7</td><td>16401</td><td>1817</td></tr><tr><td>Grow</td><td/><td>10.2</td><td>16901</td><td>1043</td></tr><tr><td>Grow-diag</td><td/><td>11.5</td><td>17428</td><td>579</td></tr><tr><td colspan=\"2\">Grow-diag-final-and</td><td>12.0</td><td>17634</td><td>372</td></tr><tr><td>Grow-diag-final</td><td/><td>13.5</td><td>18059</td><td>24</td></tr><tr><td>Union</td><td/><td>14.8</td><td>18354</td><td>23</td></tr><tr><td>Word</td><td colspan=\"2\">Count</td><td/></tr><tr><td>if</td><td>17</td><td/><td/></tr><tr><td>and</td><td>14</td><td/><td/></tr><tr><td>however</td><td>14</td><td/><td/></tr><tr><td>while</td><td>13</td><td/><td/></tr><tr><td>meanwhile</td><td>12</td><td/><td/></tr><tr><td>also</td><td>8</td><td/><td/></tr></table>", |
| "text": "Performance for the explicit connective projection for the different alignment heuristics sorted by restrictiveness." |
| } |
| } |
| } |
| } |