| { |
| "paper_id": "D14-1013", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:55:09.808881Z" |
| }, |
| "title": "Combining Punctuation and Disfluency Prediction: An Empirical Study", |
| "authors": [ |
| { |
| "first": "Xuancong", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "xuancong84@gmail.com" |
| }, |
| { |
| "first": "Khe", |
| "middle": [], |
| "last": "Chai", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National University of Singapore", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Hwee", |
| "middle": [ |
| "Tou" |
| ], |
| "last": "Ng", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Punctuation prediction and disfluency prediction can improve downstream natural language processing tasks such as machine translation and information extraction. Combining the two tasks can potentially improve the efficiency of the overall pipeline system and reduce error propagation. In this work 1 , we compare various methods for combining punctuation prediction (PU) and disfluency prediction (DF) on the Switchboard corpus. We compare an isolated prediction approach with a cascade approach, a rescoring approach, and three joint model approaches. For the cascade approach, we show that the soft cascade method is better than the hard cascade method. We also use the cascade models to generate an n-best list, use the bi-directional cascade models to perform rescoring, and compare that with the results of the cascade models. For the joint model approach, we compare mixedlabel Linear-chain Conditional Random Field (LCRF), cross-product LCRF and 2layer Factorial Conditional Random Field (FCRF) with soft-cascade LCRF. Our results show that the various methods linking the two tasks are not significantly different from one another, although they perform better than the isolated prediction method by 0.5-1.5% in the F1 score. Moreover, the clique order of features also shows a marked difference.", |
| "pdf_parse": { |
| "paper_id": "D14-1013", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Punctuation prediction and disfluency prediction can improve downstream natural language processing tasks such as machine translation and information extraction. Combining the two tasks can potentially improve the efficiency of the overall pipeline system and reduce error propagation. In this work 1 , we compare various methods for combining punctuation prediction (PU) and disfluency prediction (DF) on the Switchboard corpus. We compare an isolated prediction approach with a cascade approach, a rescoring approach, and three joint model approaches. For the cascade approach, we show that the soft cascade method is better than the hard cascade method. We also use the cascade models to generate an n-best list, use the bi-directional cascade models to perform rescoring, and compare that with the results of the cascade models. For the joint model approach, we compare mixedlabel Linear-chain Conditional Random Field (LCRF), cross-product LCRF and 2layer Factorial Conditional Random Field (FCRF) with soft-cascade LCRF. Our results show that the various methods linking the two tasks are not significantly different from one another, although they perform better than the isolated prediction method by 0.5-1.5% in the F1 score. Moreover, the clique order of features also shows a marked difference.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The raw output from automatic speech recognition (ASR) systems does not have sentence bound-aries or punctuation symbols. Spontaneous speech also contains a significant proportion of disfluency. Researchers have shown that splitting input sequences into sentences and adding in punctuation symbols improve machine translation (Favre et al., 2008; Lu and Ng, 2010) . Moreover, disfluencies in speech also introduce noise in downstream tasks like machine translation and information extraction (Wang et al., 2010) . Thus, punctuation prediction (PU) and disfluency prediction (DF) are two important post-processing tasks for automatic speech recognition because they improve not only the readability of ASR output, but also the performance of downstream Natural Language Processing (NLP) tasks.", |
| "cite_spans": [ |
| { |
| "start": 326, |
| "end": 346, |
| "text": "(Favre et al., 2008;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 347, |
| "end": 363, |
| "text": "Lu and Ng, 2010)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 492, |
| "end": 511, |
| "text": "(Wang et al., 2010)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The task of punctuation prediction is to insert punctuation symbols into conversational speech texts. Punctuation prediction on long, unsegmented texts also achieves the purpose of sentence boundary prediction, because sentence boundaries are identified by sentence-end punctuation symbols: periods, question marks, and exclamation marks. Consider the following example, How do you feel about the Viet Nam War ? Yeah , I saw that as well .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The question mark splits the sequence into two sentences. This paper deals with this task which is more challenging than that on text that has already been split into sentences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The task of disfluency prediction is to identify word tokens that are spoken incorrectly due to speech disfluency. There are two main types of disfluencies: filler words and edit words. Filler words mainly include filled pauses (e.g., 'uh', 'um') and discourse markers (e.g., \"I mean\", \"you know\"). As they are insertions in spontaneous speech to indicate pauses or mark boundaries in discourse, they do not convey useful content information. Edit words are words that are spoken wrongly and then corrected by the speaker. For example, consider the following utterance: The phrase \"to Boston\" forms the edit region to be replaced by \"to Denver\". The words \"uh I mean\" are filler words that serve to cue the listener about the error and subsequent corrections.", |
| "cite_spans": [ |
| { |
| "start": 235, |
| "end": 246, |
| "text": "'uh', 'um')", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The motivation of combining the two tasks can be illustrated by the following two utterances:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "I am uh I am not going with you . I am sorry . I am not going with you .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Notice that the bi-gram \"I am\" is repeated in both sentences. For the first utterance, if punctuation prediction is performed first, it might break the utterance both before and after \"uh\" so that the second-stage disfluency prediction will treat the whole utterance as three sentences, and thus may not be able to detect any disfluency because each one of the three sentences is legitimate on its own. On the other hand, for the second utterance, if disfluency prediction is performed first, it might mark \"I am sorry\" as disfluent in the first place and remove it before passing into the second-stage punctuation prediction. Therefore, no matter which task is performed first, certain utterances can always cause confusion.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "There are many ways to combine the two tasks. For example, we can perform one task first followed by another, which is called the cascade approach. We can also mix the labels, or take the cross-product of the labels, or use joint prediction models. In this paper, we study the mutual influence between the two tasks and compare a variety of common state-of-the-art joint prediction techniques on this joint task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In Section 2, we briefly introduce previous work on the two tasks. In Section 3, we describe our baseline system which performs punctuation and disfluency prediction separately (i.e., in isolation). In Section 4, we compare the soft cascade approach with the hard cascade approach. We also examine the effect of task order, i.e., performing which task first benefits more. In Section 5, we compare the cascade approach with bi-directional n-best rescoring. In Section 6, we compare the 2layer Factorial CRF (Sutton et al., 2007) with the cross-product LCRF (Ng and Low, 2004) , mixedlabel LCRF , the cascade approach, and the baseline isolated prediction. Section 7 gives a summary of our overall findings. Section 8 gives the conclusion.", |
| "cite_spans": [ |
| { |
| "start": 507, |
| "end": 528, |
| "text": "(Sutton et al., 2007)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 557, |
| "end": 575, |
| "text": "(Ng and Low, 2004)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "There were many works on punctuation prediction or disfluency prediction as an isolated task. For punctuation prediction, Huang and Zweig (2002) used maximum entropy model; Christensen et al. (2001) used finite state and multi-layer perceptron method; Liu et al. (2005) used conditional random fields; Lu and Ng (2010) proposed using dynamic conditional random fields for joint sentence boundary type and punctuation prediction; Wang et al. (2012) has added prosodic features for the dynamic conditional random field approach and Zhang et al. (2013) used transition-based parsing.", |
| "cite_spans": [ |
| { |
| "start": 122, |
| "end": 144, |
| "text": "Huang and Zweig (2002)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 173, |
| "end": 198, |
| "text": "Christensen et al. (2001)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 252, |
| "end": 269, |
| "text": "Liu et al. (2005)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 302, |
| "end": 318, |
| "text": "Lu and Ng (2010)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 429, |
| "end": 447, |
| "text": "Wang et al. (2012)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 530, |
| "end": 549, |
| "text": "Zhang et al. (2013)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Previous Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "For disfluency prediction, Shriberg et al. (1997) uses purely prosodic features to perform the task. Johnson and Charniak (2004) proposed a TAGbased (Tree-Adjoining Grammar) noisy channel model. Maskey et al. (2006) proposed a phraselevel machine translation approach for this task. Georgila (2009) used integer linear programming (ILP) which can incorporate local and global constraints. Zwarts and Johnson (2011) has investigated the effect of using extra language models as features in the reranking stage. Qian and Liu (2013) proposed using weighted Max-margin Markov Networks (M3N) to balance precision and recall to further improve the F1-score. Wang et al. (2014) proposed a beam-search decoder which integrates M3N and achieved further improvements.", |
| "cite_spans": [ |
| { |
| "start": 27, |
| "end": 49, |
| "text": "Shriberg et al. (1997)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 101, |
| "end": 128, |
| "text": "Johnson and Charniak (2004)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 195, |
| "end": 215, |
| "text": "Maskey et al. (2006)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 283, |
| "end": 298, |
| "text": "Georgila (2009)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 389, |
| "end": 414, |
| "text": "Zwarts and Johnson (2011)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 510, |
| "end": 529, |
| "text": "Qian and Liu (2013)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 652, |
| "end": 670, |
| "text": "Wang et al. (2014)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Previous Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "There were also some works that addressed both tasks. Liu et al. (2006) and Baron et al. (1998) carried out sentence unit (SU) and disfluency prediction as separate tasks. The difference between SU prediction and punctuation prediction is only in the non-sentence-end punctuation symbols such as commas. mixed sentence boundary labels with disfluency labels so that they do not predict punctuation on disfluent tokens. Kim (2004) performed joint SU and Interruption Point (IP) prediction, deriving edit and filler word regions from predicted IPs using a rulebased system as a separate step.", |
| "cite_spans": [ |
| { |
| "start": 54, |
| "end": 71, |
| "text": "Liu et al. (2006)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 76, |
| "end": 95, |
| "text": "Baron et al. (1998)", |
| "ref_id": null |
| }, |
| { |
| "start": 419, |
| "end": 429, |
| "text": "Kim (2004)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Previous Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In this paper, we treat punctuation prediction and disfluency prediction as a joint prediction task, and compare various state-of-the-art joint prediction methods on this task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Previous Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We use the Switchboard corpus (LDC99T42) in our experiment with the same train/develop/test split as (Qian and Liu, 2013) and (Johnson and Charniak, 2004) . The corpus statistics are shown in Table 1 . Since the proportion of exclamation marks and incomplete SU boundaries is too small, we convert all exclamation marks to periods and remove all incomplete SU boundaries (treat as no punctuation). In the Switchboard corpus, the utterances of each speaker have already been segmented into short sentences when used in (Qian and Liu, 2013; Johnson and Charniak, 2004) . In our work, we concatenate the utterances of each speaker to form one long sequence of words for use as the input to punctuation prediction and disfluency prediction. This form of input where, utterances are not pre-segmented into short sentences, better reflects the real-world scenarios and provides a more realistic test setting for punctuation and disfluency prediction. Punctuation prediction also gives rise to sentence segmentation in this setting. Table 1 : Corpus statistics for all the experiments. *: each conversation produces two long/sentencejoined sequences, one from each speaker.", |
| "cite_spans": [ |
| { |
| "start": 101, |
| "end": 121, |
| "text": "(Qian and Liu, 2013)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 126, |
| "end": 154, |
| "text": "(Johnson and Charniak, 2004)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 518, |
| "end": 538, |
| "text": "(Qian and Liu, 2013;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 539, |
| "end": 566, |
| "text": "Johnson and Charniak, 2004)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 192, |
| "end": 199, |
| "text": "Table 1", |
| "ref_id": null |
| }, |
| { |
| "start": 1026, |
| "end": 1033, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Our baseline system uses M3N (Taskar et al., 2004) , one M3N for punctuation prediction and the other for disfluency prediction. We use the same set of punctuation and disfluency labels (as shown in Table 2 ) throughout this paper. To compare the various isolated, cascade, and joint prediction models, we use the same feature templates for both tasks as listed in Table 3 . Since some of the feature templates require predicted filler labels and part-of-speech (POS) tags, we have trained a POS tagger and a filler predictor both using CRF (i.e., using the same approach as that in Qian and Liu (2013) ). The same predicted POS tags and fillers are used for feature extraction in all the experiments in this paper for a fair comparison. The degradation on disfluency prediction due to the concatenation of utterances of each speaker is shown in Table 4 . The pause duration features are extracted by running forced alignment on the corresponding Switchboard speech corpus (LDC97S62).", |
| "cite_spans": [ |
| { |
| "start": 29, |
| "end": 50, |
| "text": "(Taskar et al., 2004)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 583, |
| "end": 602, |
| "text": "Qian and Liu (2013)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 199, |
| "end": 206, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 365, |
| "end": 372, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 846, |
| "end": 853, |
| "text": "Table 4", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Disfluency prediction", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Label Meaning", |
| "sec_num": null |
| }, |
| { |
| "text": "E edit word F filler word O otherwise / fluent Punctuation prediction Comma comma Period full-stop QMark", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Label Meaning", |
| "sec_num": null |
| }, |
| { |
| "text": "question mark None no punctuation Table 2 : Labels for punctuation prediction and disfluency prediction.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 34, |
| "end": 41, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Task Label Meaning", |
| "sec_num": null |
| }, |
| { |
| "text": "We use the standard NLP features such as F (w \u22121 w 0 ='so that'), i.e., the word tokens at the previous and current node position are 'so' and 'that' respectively. Each feature is associated with a clique order. For example, since the clique order of this feature template is 2 (see Table 3 ), its feature functions can be", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 283, |
| "end": 290, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "f (w \u22121 w 0 ='so that', y 0 ='F', y \u22121 ='O', t).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The example has a value of 1 only when the words at node t \u2212 1 and t are 'so that', and the labels at node t and t \u2212 1 are 'F' and 'O' respectively. The maximum length of the y history is called the clique order of the feature (in this feature function, it is 2 since only y 0 and y \u22121 are covered). The feature templates are listed in Table 3 . w i refers to the word at the i th position relative to the current node; window size is the maximum span of words centered at the current word that the template covers, e.g., w \u22121 w 0 with a window size of 9 means w \u22124 w \u22123 , w \u22123 w \u22122 , ..., w 3 w 4 ; p i refers to the POS tag at the i th position relative to the current node; w i\u223cj refers to any word from the i th position to the j th position relative to the current node, this template can capture word pairs which can potentially indicate a repair, e.g., \"was ... is ...\", the speaker may have spoken any word(s) in between and it is very difficult for the standard n-gram features to capture all possible variations; w i, =F refers to the i th non-filler word with respect to the current position, this template can extract n-gram features skipping filler words; the multi-pair comparison function I(a, b, c, ...) indicates whether each pair (a and b, b and c, and so on) is identical, for example, if a = b = c = d, it will output \"101\" ('1' for being equal, '0' for being unequal), this feature template can capture consecutive word/POS repetitions which can further improve upon the standard repetition features; and ngram-score features are the natural logarithm of the following 8 probabilities:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 336, |
| "end": 343, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "P (w \u22123 , w \u22122 , w \u22121 , w 0 ), P (w 0 |w \u22123 , w \u22122 , w \u22121 ), P (w \u22123 , w \u22122 , w \u22121 ), P ( /s |w \u22123 , w \u22122 , w \u22121 ), P (w \u22123 ), P (w \u22122 ), P (w \u22121 ) and P (w 0 ) (where \" /s \" denotes sentence-end). Feature Template Window Size Clique Order w 0 9 1 w \u22121 w 0 9 2 w \u22122 w \u22121 w 0 9 2 p 0 9 1 p \u22121 p 0 9 2 p \u22122 p \u22121 p 0 9 2 w 0 w \u22126\u223c\u22121 , w 0 w 1\u223c6 1 1 I(w i , w j ) 21 2 I(w i , w j , w i+1 , w j+1 ) 21 2 I(w i , w j )(w i if w i =w j ) 21 2 I(p i , p j ) 21 3 I(p i , p j , p i+1 , p j+1 ) 21 3 I(p i , p j )(p i if p i =p j ) 21 3 p \u22121 w 0 5 2 w \u22121 p 0 5 2 w \u22122, =F w \u22121, =F 1 2 w \u22123, =F w \u22122, =F w \u22121, =F 1 2 p \u22122, =F p \u22121, =F 1 2 p \u22123, =F p \u22122, =F p \u22121, =F", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "1 2 ngram-score features 1 3 pause duration before w 0 1 3 pause duration after w 0 1 3 transitions 1 3 Table 3 : Feature templates for disfluency prediction, or punctuation prediction, or joint prediction for all the experiments in this paper.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 104, |
| "end": 111, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The performance of the system can be further improved by adding additional prosodic features (Savova and Bachenko, 2003; Christensen et al., 2001 ) apart from pause durations. However, since in this work we focus on model-level comparison, we do not use other prosodic features for simplicity. We use the standard F1 score as our evaluation metric and this is similar to that in Qian and Liu (2013) . For training, we set the frequency pruning threshold to 5 to control the number of parameters. The regularization parameter is tuned on the development set. Since the toolkits used to run different experiments have slightly different limitations, in order to make fair comparisons across different toolkits, we do not use weighting to balance precision and recall when training M3N and we have reduced the clique order of transition features to two and all the other features to one in some of our experiments. Since the performance of filler word prediction on this dataset is already very high, (>97%), we only focus on the F1 score of edit word prediction in this paper when reporting the performance of disfluency prediction. Table 4 shows our baseline results. Our preliminary study shows the following gen-eral trends: (i) for disfluency prediction: joining utterances into long sentences will cause a 5-6% drop in F1 score; removing precision/recall balance in M3N will cause about 1% drop in F1 score; and reducing the clique order in Table 3 will cause about 1-2% drop in F1 score; and (ii) for punctuation prediction: removing precision/recall balance in M3N will cause negligible drop in F1 score; and reducing clique order will cause about 2-3% drop in F1 score. Conventionally, the degradation from reducing the clique orders can be mostly compensated by using the BIES (Begin, Inside, End, and Single) labeling scheme. In this work, for consistency and comparability across various experiments, we will stick to the same set of labels in Table 2 .", |
| "cite_spans": [ |
| { |
| "start": 93, |
| "end": 120, |
| "text": "(Savova and Bachenko, 2003;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 121, |
| "end": 145, |
| "text": "Christensen et al., 2001", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 379, |
| "end": 398, |
| "text": "Qian and Liu (2013)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1131, |
| "end": 1138, |
| "text": "Table 4", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 1444, |
| "end": 1451, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 1953, |
| "end": 1960, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Instead of decomposing the joint prediction of punctuation and disfluency into two independent tasks, the cascade approach considers one task to be conditionally dependent on the other task such that the predictions are performed in sequence, where the results from the first step is used in the second step. In this paper, we compare two types of cascade: hard cascade versus soft cascade.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Cascade Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For the hard cascade, we use the output from the first step to modify the input sequence before extracting features for the second step. For PU\u2192DF (PUnctuation prediction followed by DisFluency prediction), we split the input sequence into sentences according to the sentence-end punctuation symbols predicted by the first step, and then perform the DF prediction on the short/sentence-split sequences in the second step. For DF\u2192PU, we remove the edit and filler words predicted by the first step, and then predict the punctuations using the cleaned-up input sequence. The hard cascade method may be helpful because the disfluency prediction on short/sentence-split sequences is better than on long/sentence-joined sequences (see the second and third rows in Table 4 ). On the other hand, the punctuation prediction on fluent text is more accurate than that on non-fluent text based on our preliminary study.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 759, |
| "end": 766, |
| "text": "Table 4", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Hard Cascade", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "For this experiment, four models are trained using M3N without balancing precision/recall. For the first step, two models are trained on long/sentence-joined sequences with disfluent to-kens -one for PU prediction and the other for DF prediction. These are simply the isolated baseline systems. For the second step, the DF prediction model is trained on the short/sentence-split sequences with disfluent tokens while the PU prediction model is trained on the long/sentence-joined sequences with disfluent tokens removed. Note that in the second step of DF\u2192PU, punctuation labels are predicted only for the fluent tokens since the disfluent tokens predicted by the first step has already been removed. Therefore, during evaluation, if the first step makes a false positive by predicting a fluent token as an edit or filler, we set its punctuation label to the neutral label, None. All the four models are trained using the same feature templates as shown in Table 3 . The regularization parameter is tuned on the development set.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 957, |
| "end": 964, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Hard Cascade", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "For the soft cascade method, we use the labels predicted from the first step as additional features for the second step. For PU\u2192DF, we model the joint probability as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Soft Cascade", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "P (DF, PU|x) = P (PU|x) \u00d7 P (DF|PU, x) (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Soft Cascade", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Likewise, we model the joint probability for DF\u2192PU as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Soft Cascade", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "P (DF, PU|x) = P (DF|x) \u00d7 P (PU|DF, x) (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Soft Cascade", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "For this experiment, four models are trained using M3N without balancing precision/recall. As with the case of hard cascade, the two models used in the first step are simply the isolated baseline systems. For the second step, in addition to the feature templates in Table 3 , we also pass on the labels (at the previous, current and next position) predicted by the first step as three third-orderclique features. We also tune the regularization parameter on the development set to obtain the best model. Table 5 compares the performance of the hard and soft cascade methods with the isolated baseline systems. In addition, we have also included the results of using the true labels in place of the labels predicted by the first step to indicate the upper-bound performance of the cascade approaches. The results show that both the hard and soft cascade methods outperform the baseline systems, with the latter giving a better performance 72.1 82.7 Table 5 : Performance comparison between the hard cascade method and the soft cascade method with respect to the baseline isolated prediction. All models are trained using M3N without balancing precision and recall.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 266, |
| "end": 273, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 504, |
| "end": 511, |
| "text": "Table 5", |
| "ref_id": null |
| }, |
| { |
| "start": 948, |
| "end": 955, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Soft Cascade", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "(statistical significance at p=0.01). However, hard cascade has a higher upper-bound than soft cascade. This observation can be explained as follows. For hard cascade, the input sequence is modified prior to feature extraction. Therefore, many of the features generated by the feature templates given in Table 3 will be affected by these modifications. So, provided that the modifications are based on the correct information, the resulting features will not contain unwanted artefacts caused by the absence of the sentence boundary information for the presence of disfluencies. For example, in \"do you do you feel that it was worthy\", the punctuation prediction system tends to insert a sentence-end punctuation after the first \"do you\" because the speaker restarts the sentence.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 304, |
| "end": 311, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "If the disfluency was correctly predicted in the first step, then the hard cascade method would have removed the first \"do you\" and eliminated the confusion. Similarly, in \"I 'm sorry . I 'm not going with you tomorrow . \", the first \"I 'm\" is likely to be incorrectly detected as disfluent tokens since consecutive repetitions are a strong indication of disfluency. In the case of hard cascade, PU\u2192DF, the input sequence would have been split into sentences and the repetition feature would not be activated. However, since the hard cascade method has a greater influence on the features for the second step, it is also more sensitive to the prediction errors from the first step.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Another observation from Table 5 is that the improvement of the soft cascade over the isolate baseline is much larger on DF (1.4% absolute) than that on PU (only 0.5% absolute). The same holds true for the hard cascade, despite the fact that there are more DF labels than PU labels in this corpus (see Table 1 ) and the first step prediction is more accurate on DF than on PU. This suggests that their mutual influence is not symmetrical, in the way that the output from punctuation prediction provides more useful information for disfluency prediction than the other way round.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 25, |
| "end": 32, |
| "text": "Table 5", |
| "ref_id": null |
| }, |
| { |
| "start": 302, |
| "end": 309, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "In Section 4, we have described that the two tasks can be cascaded in either order, i.e., PU\u2192DF and DF\u2192PU. However, the performance of the second step greatly depends on that of the first step. In order to reduce sensitivity to the errors made in the first step, one simple approach is to propagate multiple hypotheses from the first step to the second step to obtain a list of joint hypotheses (with both the DF and PU labels). We then rerank these hypotheses based on the joint probability and pick the best. We call this the rescoring approach. From (1) and (2), the joint probabilities can be expressed in terms of the probabilities generated by four models: P (PU|x), P (DF|PU, x), P (DF|x), and P (PU|DF, x). We can combine the four models to form the following joint probability function for rescoring:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Rescoring Approach", |
| "sec_num": "5" |
| }, |
| { |
| "text": "P (DF, PU|x) = P (DF|x) \u03b1 1 \u00d7 P (PU|DF, x) \u03b1 2 \u00d7 P (PU|x) \u03b2 1 \u00d7 P (DF|PU, x) \u03b2 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Rescoring Approach", |
| "sec_num": "5" |
| }, |
| { |
| "text": "where \u03b1 1 , \u03b1 2 , \u03b2 1 , and \u03b2 2 are used to weight the relative importance between (1) and (2); and between the first and second steps. In practice, the probabilities are computed in the log domain where the above expression becomes a weighted sum of the log probabilities. A similar rescoring approach using two models is described in Shi and Wang (2007) .", |
| "cite_spans": [ |
| { |
| "start": 336, |
| "end": 355, |
| "text": "Shi and Wang (2007)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Rescoring Approach", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The experimental framework is shown in Figure 1. For PU\u2192DF, we first use P (PU|x) to generate an n-best list. Then, for each hypothesis in the n-best list, we use P (DF|PU, x) to obtain another n-best list. So we have n 2 -best joint hypotheses. We do the same for DF\u2192PU to obtain another n 2 -best joint hypotheses. We rescore the 2n 2 -best list using the four models. The four weights \u03b1 1 , \u03b1 2 , \u03b2 1 , and \u03b2 2 are tuned to optimize the overall F1 score on the development set. We used the MERT (minimum-error-rate training, (Och, 2003) ) algorithm to tune the weights. We also vary the size of n.", |
| "cite_spans": [ |
| { |
| "start": 528, |
| "end": 539, |
| "text": "(Och, 2003)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 39, |
| "end": 45, |
| "text": "Figure", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Rescoring Approach", |
| "sec_num": "5" |
| }, |
| { |
| "text": "P(PU|x) P(DF|PU,x) Input Sequence P(PU|DF,x) P(DF|x) PU-hypo-1 PU-hypo-n \u2026 DF-hypo-1 DF-hypo-n \u2026 2 n-best joint-hypo-1 \u2026 \u2026 joint-hypo-1 2 n 2 -best joint-hypo-2 joint-hypo-2 \u2026 \u2026", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Rescoring Approach", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Rescore using:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Rescoring Approach", |
| "sec_num": "5" |
| }, |
| { |
| "text": "1 \u2022 log PU|x + 2 \u2022 log DF|PU, x + 1 \u2022 log DF|x + 2 \u2022 log PU|DF, x", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Rescoring Approach", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Figure 1: Illustration of the rescoring pipeline framework using the four M3N models used in the softcascade method: P (PU|x), P (DF|PU, x), P (DF|x) and P (PU|DF, x)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Rescoring Approach", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The results shown in Table 6 suggest that the rescoring method does not improve over the softcascade baseline. This can be due to the fact that we are using the same four models for the softcascade and the rescoring methods. It may be possible that the information contained in the two models for the soft-cascade PU\u2192DF mostly overlaps with the information contained in the other two models for the soft-cascade DF\u2192PU since all the four models are trained using the same features. Thus, no additional information is gained by combining the four models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 21, |
| "end": 28, |
| "text": "Table 6", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Rescoring Approach", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In this section, we compare 2-layer FCRF (Lu and Ng, 2010) with mixed-label LCRF and cross-product LCRF on the joint prediction task. For the 2-layer FCRF, we use punctuation labels for the first layer and disfluency labels for the second layer (see Table 2 ). For the mixedlabel LCRF, we split the neutral label {O} into {Comma, Period, QMark, None} so that we have six labels in total, {E, F, Comma, Period, QMark, None}. In this approach, disfluent tokens do not have punctuation labels because in real applications, if we just want to get the cleaned-up/fluent text with punctuations, we do not need to predict punctuations on disfluent tokens as they will be removed during the clean-up process. Since this approach does not predict punctuation labels on disfluent tokens, its punctuation F1 score is only evaluated on those fluent tokens. For the crossproduct LCRF, we compose each of the three disfluency labels with the four punctuation labels to get 12 PU-DF-joint labels (Ng and Low, 2004) . Figure 2 shows a comparison of these three models in the joint prediction of punctuation and disfluency. All the LCRF and FCRF models are trained using the GRMM toolkit (Sutton, 2006) . We use the same feature templates (Table 3) to generate all the features for the toolkit. However, to reduce the training time, we have set clique order to 2 for the transitions and 1 for all other features. We tune the Gaussian prior variance on the development set for all the experiments to obtain the best model for testing. Table 7 shows the comparison of results. On DF alone, the improvement of the cross-product LCRF over the mixed-label LCRF, and the improvement of the mixed-label LCRF over the isolated baseline are not statistically significant. However, if we test the statistical significance on the overall performance of both PU and DF, both the 2-layer FCRF and the cross-product LCRF perform better than the mixed-label LCRF. And we also obtain the same conclusion as that mixed-label LCRF performs better than isolated prediction. However, for the comparison between the 2-layer FCRF and the cross-product LCRF, although the 2-layer FCRF performs better than the cross-product LCRF on disfluency prediction, it does worse on punctuation prediction. Overall, the two methods perform about the same, their difference is not statistically significant. In addition, both the 2-layer FCRF and the cross-product LCRF slightly outperform the soft cascade method (statistical significance at p=0.04). ", |
| "cite_spans": [ |
| { |
| "start": 41, |
| "end": 58, |
| "text": "(Lu and Ng, 2010)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 981, |
| "end": 999, |
| "text": "(Ng and Low, 2004)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1171, |
| "end": 1185, |
| "text": "(Sutton, 2006)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 250, |
| "end": 257, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 1002, |
| "end": 1010, |
| "text": "Figure 2", |
| "ref_id": null |
| }, |
| { |
| "start": 1222, |
| "end": 1231, |
| "text": "(Table 3)", |
| "ref_id": null |
| }, |
| { |
| "start": 1517, |
| "end": 1524, |
| "text": "Table 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Joint Approach", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In this section, we will summarise our observations based on the empirical studies that we have conducted in this paper. Firstly, punctuation prediction and disfluency prediction do influence each other. The output from one task does provide useful information that can improve the other task. All the approaches studied in this work, which link the two tasks together, perform better than their corresponding Experiment F1 for PU F1 for DF isolated baseline 68.7 77.0 soft cascade 69.0 77.5 mixed-label LCRF 69.0 77.2 cross-product LCRF 69.9 77.3 2-layer FCRF 69.2 77.8 Table 7 : Performance comparison among 2layer FCRF, mixed-label LCRF and cross-product LCRF, with respect to the soft-cascade and the isolated prediction baseline. All models are trained using GRMM (Sutton, 2006) , with reduced clique orders. ", |
| "cite_spans": [ |
| { |
| "start": 769, |
| "end": 783, |
| "text": "(Sutton, 2006)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 571, |
| "end": 578, |
| "text": "Table 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "x 1 x 2 x 9 x 3 x 4 x 5 x 6 x 7 x 8 x 1 x 2 x 9 x 3 x 4 x 5 x 6 x 7 x 8 x 1 x 2 x 9 x 3 x 4 x 5 x 6 x 7 x 8", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Figure 2: Illustration using (a) mixed-label LCRF; (b) cross-product LCRF; and (c) 2-layer FCRF, for joint punctuation (PU) and disfluency (DF) prediction. Shaded nodes are observations and unshaded nodes are variables to be predicted. isolated prediction baseline. Secondly, as compared to the soft cascade, the hard cascade passes more information from the first step into the second step, and thus is much more sensitive to errors in the first step. In practice, unless the first step has very high accuracy, soft cascade is expected to do better than hard cascade.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Thirdly, if we train a model using a fine-grained label set but test it on the same coarse-grained label set, we are very likely to get improvement. For example:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "\u2022 The edit word F1 for mixed edit and filler prediction using {E, F, O} is better than that for edit prediction using {E, O} (see the second and third rows in Table 4 ). This is because the former actually splits the O in the latter into F and O. Thus, it has a finer label granularity.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 159, |
| "end": 166, |
| "text": "Table 4", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "\u2022 Disfluency prediction using mixed-label LCRF (using label set {E, F, Comma, Period, Question, None}) performs better than that using isolated LCRF (using label set {E, F, O}) (see the second and fourth rows in Table 7 ). This is because the former dis-tinguishes between different punctuations for fluent tokens and thus has a finer label granularity.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 212, |
| "end": 219, |
| "text": "Table 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "\u2022 Both the cross-product LCRF and 2-layer FCRF perform better than mixed-label LCRF because the former two distinguish between different punctuations for edit, filler and fluent tokens while the latter distinguishes between different punctuations only for fluent tokens. Thus, the former has a much finer label granularity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "From the above comparisons, we can see that increasing the label granularity can greatly improve the accuracy of a model. However, this may also increase the model complexity dramatically, especially when higher clique order is used. Although the joint approach (2-layer FCRF and cross-product LCRF) are better than the softcascade approach, they cannot be easily scaled up to using higher order cliques, which greatly limits their potential. In practice, the soft cascade approach offers a simpler and more efficient way to achieve a joint prediction of punctuations and disfluencies.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "In general, punctuation prediction and disfluency prediction can improve downstream NLP tasks. Combining the two tasks can potentially improve the efficiency of the overall framework and minimize error propagation. In this work, we have carried out an empirical study on the various methods for combining the two tasks. Our results show that the various methods linking the two tasks perform better than the isolated prediction. This means that punctuation prediction and disfluency prediction do influence each other, and the prediction outcome in one task can provide useful information that helps to improve the other task. Specifically, we compare the cascade models and the joint prediction models. For the cascade approach, we show that soft cascade is less sensitive to prediction errors in the first step, and thus performs better than hard cascade. For joint model approach, we show that, when clique order of one is used, all the three joint model approaches perform significantly better than the isolated prediction baseline. Moreover, the 2-layer FCRF and the cross-product LCRF perform slightly better than the mix-label LCRF and the soft-cascade approach, suggesting that modelling at a finer label granularity is potentially beneficial. However, the soft cascade approach is more efficient than the joint approach when a higher clique order is used.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "The research reported in this paper was carried out as part of the PhD thesis research of Xuancong Wang at the NUS Graduate School for Integrated Sciences and Engineering.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This research is supported by the Singapore National Research Foundation under its International Research Centre @ Singapore Funding Initiative and administered by the IDM Programme Office.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Automatic punctuation and disfluency detection in multi-party meetings using prosodic and lexical cues", |
| "authors": [ |
| { |
| "first": "Don", |
| "middle": [], |
| "last": "Baron", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Shriberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Stolcke", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proc. of ICSLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Don Baron, Elizabeth Shriberg, and Andreas Stolcke. 2002. Automatic punctuation and disfluency detec- tion in multi-party meetings using prosodic and lex- ical cues. In Proc. of ICSLP.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Punctuation annotation using statistical prosody models", |
| "authors": [ |
| { |
| "first": "Heidi", |
| "middle": [], |
| "last": "Christensen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshihiko", |
| "middle": [], |
| "last": "Gotoh", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Renals", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "ISCA Tutorial and Research Workshop (ITRW) on Prosody in Speech Recognition and Understanding", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Heidi Christensen, Yoshihiko Gotoh, and Steve Re- nals. 2001. Punctuation annotation using statisti- cal prosody models. In ISCA Tutorial and Research Workshop (ITRW) on Prosody in Speech Recognition and Understanding.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Punctuating speech for information extraction", |
| "authors": [ |
| { |
| "first": "Ralph", |
| "middle": [], |
| "last": "Benoit Favre", |
| "suffix": "" |
| }, |
| { |
| "first": "Dustin", |
| "middle": [], |
| "last": "Grishman", |
| "suffix": "" |
| }, |
| { |
| "first": "Heng", |
| "middle": [], |
| "last": "Hillard", |
| "suffix": "" |
| }, |
| { |
| "first": "Dilek", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "Mari", |
| "middle": [], |
| "last": "Hakkani-Tur", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ostendorf", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proc. of ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Benoit Favre, Ralph Grishman, Dustin Hillard, Heng Ji, Dilek Hakkani-Tur, and Mari Ostendorf. 2008. Punctuating speech for information extraction. In Proc. of ICASSP.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Using integer linear programming for detecting speech disfluencies", |
| "authors": [ |
| { |
| "first": "Kallirroi", |
| "middle": [], |
| "last": "Georgila", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proc. of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kallirroi Georgila. 2009. Using integer linear pro- gramming for detecting speech disfluencies. In Proc. of NAACL.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Maximum entropy model for punctuation annotation from speech", |
| "authors": [ |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Zweig", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proc. of INTERSPEECH", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jing Huang and Geoffrey Zweig. 2002. Maximum en- tropy model for punctuation annotation from speech. In Proc. of INTERSPEECH.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "A TAGbased noisy-channel model of speech repairs", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Charniak", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Johnson and Eugene Charniak. 2004. A TAG- based noisy-channel model of speech repairs. In Proc. of ACL.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Automatic detection of sentence boundaries, disfluencies, and conversational fillers in spontaneous speech", |
| "authors": [ |
| { |
| "first": "Joungbum", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joungbum Kim. 2004. Automatic detection of sen- tence boundaries, disfluencies, and conversational fillers in spontaneous speech. Master dissertation of University of Washington.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Using conditional random fields for sentence boundary detection in speech", |
| "authors": [ |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Stolcke", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Shriberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Mary", |
| "middle": [], |
| "last": "Harper", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yang Liu, Andreas Stolcke, Elizabeth Shriberg, and Mary Harper. 2005. Using conditional random fields for sentence boundary detection in speech. In Proc. of ACL.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Enriching speech recognition with automatic detection of sentence boundaries and disfluencies", |
| "authors": [ |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Shriberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Stolcke", |
| "suffix": "" |
| }, |
| { |
| "first": "Dustin", |
| "middle": [], |
| "last": "Hillard", |
| "suffix": "" |
| }, |
| { |
| "first": "Mari", |
| "middle": [], |
| "last": "Ostendorf", |
| "suffix": "" |
| }, |
| { |
| "first": "Mary", |
| "middle": [], |
| "last": "Harper", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "IEEE Transactions on Audio, Speech, and Language Processing", |
| "volume": "14", |
| "issue": "5", |
| "pages": "1526--1540", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yang Liu, Elizabeth Shriberg, Andreas Stolcke, Dustin Hillard, Mari Ostendorf, and Mary Harper. 2006. Enriching speech recognition with automatic detec- tion of sentence boundaries and disfluencies. IEEE Transactions on Audio, Speech, and Language Pro- cessing, 14(5):1526-1540.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Better punctuation prediction with dynamic conditional random fields", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hwee Tou", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proc. of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Lu and Hwee Tou Ng. 2010. Better punctuation prediction with dynamic conditional random fields. In Proc. of EMNLP.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A phrase-level machine translation approach for disfluency detection using weighted finite state transducers", |
| "authors": [ |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Maskey", |
| "suffix": "" |
| }, |
| { |
| "first": "Bowen", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuqing", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proc. of INTERSPEECH", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sameer Maskey, Bowen Zhou, and Yuqing Gao. 2006. A phrase-level machine translation approach for dis- fluency detection using weighted finite state trans- ducers. In Proc. of INTERSPEECH.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Chinese part-of-speech tagging: One-at-a-time or all-atonce? Word-based or character-based?", |
| "authors": [ |
| { |
| "first": "Tou", |
| "middle": [], |
| "last": "Hwee", |
| "suffix": "" |
| }, |
| { |
| "first": "Jin", |
| "middle": [ |
| "Kiat" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Low", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proc. of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hwee Tou Ng and Jin Kiat Low. 2004. Chi- nese part-of-speech tagging: One-at-a-time or all-at- once? Word-based or character-based? In Proc. of EMNLP.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Minimum error rate training in statistical machine translation", |
| "authors": [ |
| { |
| "first": "Franz Josef", |
| "middle": [], |
| "last": "Och", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Franz Josef Och. 2003. Minimum error rate training in statistical machine translation. In Proc. of ACL.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Disfluency detection using multi-step stacked learning", |
| "authors": [ |
| { |
| "first": "Xian", |
| "middle": [], |
| "last": "Qian", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proc. of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xian Qian and Yang Liu. 2013. Disfluency detec- tion using multi-step stacked learning. In Proc. of NAACL.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Prosodic features of four types of disfluencies", |
| "authors": [ |
| { |
| "first": "Guergana", |
| "middle": [], |
| "last": "Savova", |
| "suffix": "" |
| }, |
| { |
| "first": "Joan", |
| "middle": [], |
| "last": "Bachenko", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "ISCA Tutorial and Research Workshop on Disfluency in Spontaneous Speech", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guergana Savova, Joan Bachenko. 2003. Prosodic fea- tures of four types of disfluencies. In ISCA Tuto- rial and Research Workshop on Disfluency in Spon- taneous Speech.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "A dual-layer CRFs based joint decoding method for cascaded segmentation and labeling tasks", |
| "authors": [ |
| { |
| "first": "Yanxin", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Mengqiu", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proc. of IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yanxin Shi and Mengqiu Wang. 2007. A dual-layer CRFs based joint decoding method for cascaded seg- mentation and labeling tasks. In Proc. of IJCAI.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "A prosody-only decision-tree model for disfluency detection", |
| "authors": [ |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Shriberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Bates", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Stolcke", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Proc. of Eurospeech", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elizabeth Shriberg, Rebecca Bates and Andreas Stol- cke. 1997. A prosody-only decision-tree model for disfluency detection. In Proc. of Eurospeech.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Can prosody aid the automatic classification of dialog acts in conversational speech?", |
| "authors": [ |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Shriberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Stolcke", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [], |
| "last": "Coccaro", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie", |
| "middle": [], |
| "last": "Meteer", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Bates", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Taylor", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Language and speech", |
| "volume": "41", |
| "issue": "", |
| "pages": "443--492", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elizabeth Shriberg, Andreas Stolcke, Daniel Jurafsky, Noah Coccaro, Marie Meteer, Rebecca Bates, Paul Taylor, Klaus Ries, Rachel Martin, and Carol Van Ess-Dykema. 1998. Can prosody aid the auto- matic classification of dialog acts in conversational speech? In Language and speech 41, no. 3-4: 443- 492.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Automatic detection of sentence boundaries and disfluencies based on recognized words", |
| "authors": [ |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Stolcke", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Shriberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [ |
| "A" |
| ], |
| "last": "Bates", |
| "suffix": "" |
| }, |
| { |
| "first": "Mari", |
| "middle": [], |
| "last": "Ostendorf", |
| "suffix": "" |
| }, |
| { |
| "first": "Dilek", |
| "middle": [], |
| "last": "Hakkani", |
| "suffix": "" |
| }, |
| { |
| "first": "Madelaine", |
| "middle": [], |
| "last": "Plauche", |
| "suffix": "" |
| }, |
| { |
| "first": "Gokhan", |
| "middle": [], |
| "last": "Tur", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Proc. of ICSLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andreas Stolcke, Elizabeth Shriberg, Rebecca A. Bates, Mari Ostendorf, Dilek Hakkani, Madelaine Plauche, Gokhan Tur, and Yu Lu. 1998. Auto- matic detection of sentence boundaries and disfluen- cies based on recognized words. In Proc. of ICSLP.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "GRMM: GRaphical Models in Mallet", |
| "authors": [ |
| { |
| "first": "Charles", |
| "middle": [], |
| "last": "Sutton", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Charles Sutton. 2006. GRMM: GRaphical Models in Mallet. http://mallet.cs.umass.edu/grmm/", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Dynamic conditional random fields: factorized probabilistic models for labeling and segmenting sequence data", |
| "authors": [ |
| { |
| "first": "Charles", |
| "middle": [], |
| "last": "Sutton", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| }, |
| { |
| "first": "Khashayar", |
| "middle": [], |
| "last": "Rohanimanesh", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "In Journal of Machine Learning Research", |
| "volume": "8", |
| "issue": "", |
| "pages": "693--723", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Charles Sutton, Andrew McCallum, and Khashayar Rohanimanesh. 2007. Dynamic conditional random fields: factorized probabilistic models for labeling and segmenting sequence data. In Journal of Ma- chine Learning Research, 8: 693-723.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Max-margin Markov networks", |
| "authors": [ |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Taskar", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Guestrin", |
| "suffix": "" |
| }, |
| { |
| "first": "Daphne", |
| "middle": [], |
| "last": "Koller", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proc. of NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ben Taskar, Carlos Guestrin, and Daphne Koller. 2004. Max-margin Markov networks. In Proc. of NIPS.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Automatic disfluency removal for improving spoken language translation", |
| "authors": [ |
| { |
| "first": "Wen", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Gokhan", |
| "middle": [], |
| "last": "Tur", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proc. of ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wen Wang, Gokhan Tur, Jing Zheng, and Necip Fazil Ayan. 2010. Automatic disfluency removal for im- proving spoken language translation. In Proc. of ICASSP.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Dynamic conditional random fields for joint sentence boundary and punctuation prediction", |
| "authors": [ |
| { |
| "first": "Xuancong", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Khe Chai", |
| "middle": [], |
| "last": "Hwee Tou Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sim", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proc. of Interspeech", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xuancong Wang, Hwee Tou Ng, and Khe Chai Sim. 2012. Dynamic conditional random fields for joint sentence boundary and punctuation prediction. In Proc. of Interspeech.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "A beam-search decoder for disfluency detection", |
| "authors": [ |
| { |
| "first": "Xuancong", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Khe Chai", |
| "middle": [], |
| "last": "Hwee Tou Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sim", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proc. of COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xuancong Wang, Hwee Tou Ng, and Khe Chai Sim. 2014. A beam-search decoder for disfluency detec- tion. In Proc. of COLING.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Punctuation prediction with transitionbased parsing", |
| "authors": [ |
| { |
| "first": "Dongdong", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuangzhi", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Nan", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Mu", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dongdong Zhang, Shuangzhi Wu, Nan Yang, and Mu Li. 2013. Punctuation prediction with transition- based parsing. In Proc. of ACL.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "The impact of language models and loss functions on repair disfluency detection", |
| "authors": [ |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Zwarts", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Simon Zwarts and Mark Johnson. 2011. The impact of language models and loss functions on repair dis- fluency detection. In Proc. of ACL.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF2": { |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "text": "Baseline results showing the degradation by joining utterances into long sentences, removing precision/recall balancing, and reducing the clique order of features. All models are trained using M3N." |
| }, |
| "TABREF5": { |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "text": "Performance comparison between the rescoring method and the soft-cascade method with respect to the baseline isolated prediction. The rescoring is done on 2n 2 hypotheses. All models are trained using M3N without balancing precision and recall. Figures in the bracket are the oracle F1 scores of the 2n 2 hypotheses. *:on the development set, the best overall result is obtained at n = 10." |
| }, |
| "TABREF6": { |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td/><td>edit</td><td colspan=\"2\">filler</td><td/><td/><td/><td/><td/><td/></tr><tr><td>Token:</td><td>it</td><td>was</td><td>n't</td><td colspan=\"2\">you know</td><td>it</td><td colspan=\"3\">was never announced</td></tr><tr><td>PU:</td><td colspan=\"8\">None None Comma None Comma None None None</td><td>Period</td></tr><tr><td>DF:</td><td>E</td><td>E</td><td>E</td><td>F</td><td>F</td><td>O</td><td>O</td><td>O</td><td>O</td></tr><tr><td>(a)</td><td/><td>E</td><td>E</td><td>F</td><td>F</td><td>O</td><td>O</td><td>O</td><td>Period</td></tr><tr><td>Mixed-</td><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>label</td><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>LCRF</td><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>(b) Cross-</td><td>None E</td><td>None E</td><td>Comma E</td><td>None F</td><td>Comma F</td><td>None O</td><td>None O</td><td>None O</td><td>Period O</td></tr><tr><td>product</td><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>LCRF</td><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>(c)</td><td>None</td><td colspan=\"5\">None Comma None Comma None</td><td>None</td><td>None</td><td>Period</td></tr><tr><td>2-layer</td><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>FCRF</td><td>E</td><td>E</td><td>E</td><td>F</td><td>F</td><td>O</td><td>O</td><td>O</td><td>O</td></tr></table>", |
| "type_str": "table", |
| "text": "Ref:it was n't , you know , it was never announced ." |
| } |
| } |
| } |
| } |