File size: 65,616 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
{
    "paper_id": "P01-1012",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T09:30:08.191674Z"
    },
    "title": "Detecting problematic turns in human-machine interactions: Rule-induction versus memory-based learning approaches Antal van den Bosch \u00a1",
    "authors": [
        {
            "first": "Ilk",
            "middle": [
                "/"
            ],
            "last": "Comp",
            "suffix": "",
            "affiliation": {},
            "email": ""
        },
        {
            "first": "",
            "middle": [],
            "last": "Ling",
            "suffix": "",
            "affiliation": {},
            "email": ""
        },
        {
            "first": "Emiel",
            "middle": [],
            "last": "Krahmer",
            "suffix": "",
            "affiliation": {},
            "email": ""
        },
        {
            "first": "Marc",
            "middle": [],
            "last": "Swerts",
            "suffix": "",
            "affiliation": {},
            "email": "m.g.j.swerts@tue.nl"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "We address the issue of on-line detection of communication problems in spoken dialogue systems. The usefulness is investigated of the sequence of system question types and the word graphs corresponding to the respective user utterances. By applying both ruleinduction and memory-based learning techniques to data obtained with a Dutch train timetable information system, the current paper demonstrates that the aforementioned features indeed lead to a method for problem detection that performs significantly above baseline. The results are interesting from a dialogue perspective since they employ features that are present in the majority of spoken dialogue systems and can be obtained with little or no computational overhead. The results are interesting from a machine learning perspective, since they show that the rule-based method performs significantly better than the memory-based method, because the former is better capable of representing interactions between features.",
    "pdf_parse": {
        "paper_id": "P01-1012",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "We address the issue of on-line detection of communication problems in spoken dialogue systems. The usefulness is investigated of the sequence of system question types and the word graphs corresponding to the respective user utterances. By applying both ruleinduction and memory-based learning techniques to data obtained with a Dutch train timetable information system, the current paper demonstrates that the aforementioned features indeed lead to a method for problem detection that performs significantly above baseline. The results are interesting from a dialogue perspective since they employ features that are present in the majority of spoken dialogue systems and can be obtained with little or no computational overhead. The results are interesting from a machine learning perspective, since they show that the rule-based method performs significantly better than the memory-based method, because the former is better capable of representing interactions between features.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Given the state of the art of current language and speech technology, communication problems are unavoidable in present-day spoken dialogue systems. The main source of these problems lies in the imperfections of automatic speech recognition, but also incorrect interpretations by the natural language understanding module or wrong default assumptions by the dialogue manager are likely to lead to confusion. If a spoken dialogue system had the ability to detect communication problems on-line and with high accuracy, it might be able to correct certain errors or it could interact with the user to solve them. For instance, in the case of communication problems, it would be beneficial to change from a relatively natural dialogue strategy to a more constrained one in order to resolve the problems (see e.g., Litman and Pan 2000) . Similarly, it has been shown that users switch to a 'marked', hyperarticulate speaking style after problems (e.g., Soltau and Waibel 1998) , which itself is an important source of recognition errors. This might be solved by using two recognizers in parallel, one trained on normal speech and one on hyperarticulate speech. If there are communication problems, then the system could decide to focus on the recognition results delivered by the engine trained on hyperarticulate speech.",
                "cite_spans": [
                    {
                        "start": 810,
                        "end": 830,
                        "text": "Litman and Pan 2000)",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 948,
                        "end": 971,
                        "text": "Soltau and Waibel 1998)",
                        "ref_id": "BIBREF11"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "For such approaches to work, however, it is essential that the spoken dialogue system is able to automatically detect communication problems with a high accuracy. In this paper, we investigate the usefulness for problem detection of the word graph and the history of system question types. These features are present in many spoken dialogue systems and do not require additional computation, which makes this a very cheap method to detect problems. We shall see that on the basis of the previous and the current word graph and the six most recent system question types, communication problems can be detected with an accuracy of 91%, which is a significant improvement over the relevant baseline. This shows that spoken dialogue systems may use these features to better predict whether the ongoing dialogue is problematic. In addition, the current work is interesting from a machine learning perspective. We apply two machine learning techniques: the memory-based IB1-IG algorithm (Aha et al. 1991 , Daelemans et al. 1997 and the RIPPER rule induction algorithm (Cohen 1996) . As we shall see, some interesting differences between the two approaches arise.",
                "cite_spans": [
                    {
                        "start": 981,
                        "end": 997,
                        "text": "(Aha et al. 1991",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 998,
                        "end": 1021,
                        "text": ", Daelemans et al. 1997",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 1062,
                        "end": 1074,
                        "text": "(Cohen 1996)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Recently there has been an increased interest in developing automatic methods to detect problematic dialogue situations using machine learning techniques. For instance, Litman et al. (1999) and Walker et al. (2000a) use RIPPER (Cohen 1996) to classify problematic and unproblematic dialogues. Following up on this, Walker et al. (2000b) aim at detecting problems at the utterance level, based on data obtained with AT&Ts How May I Help You (HMIHY) system (Gorin et al. 1997 ). Walker and co-workers apply RIPPER to 43 features which are automatically generated by three modules of the HMIHY system, namely the speech recognizer (ASR), the natural language understanding module (NLU) and the dialogue manager (DM). The best result is obtained using all features: communication problems are detected with an accuracy of 86%, a precision of 83% and a recall of 75%. It should be noted that the NLU features play first fiddle among the set of all features. In fact, using only the NLU features performs comparable to using all features. Walker et al. (2000b) also briefly compare the performance of RIPPER with some other machine learning approaches, and show that it performs comparable to a memory-based (instance-based) learning algorithm (IB, see Aha et al. 1991) .",
                "cite_spans": [
                    {
                        "start": 169,
                        "end": 189,
                        "text": "Litman et al. (1999)",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 194,
                        "end": 215,
                        "text": "Walker et al. (2000a)",
                        "ref_id": "BIBREF13"
                    },
                    {
                        "start": 227,
                        "end": 239,
                        "text": "(Cohen 1996)",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 315,
                        "end": 336,
                        "text": "Walker et al. (2000b)",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 455,
                        "end": 473,
                        "text": "(Gorin et al. 1997",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 1033,
                        "end": 1054,
                        "text": "Walker et al. (2000b)",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 1247,
                        "end": 1263,
                        "text": "Aha et al. 1991)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related work",
                "sec_num": "2"
            },
            {
                "text": "The results which Walker and co-workers describe show that it is possible to automatically detect communication problems in the HMIHY system, using machine learning techniques. Their approach also raises a number of interesting followup questions, some concerned with problem detection, others with the use of machine learning techniques. (1) Walker et al. train their classifier on a large set of features, and show that the set of features produced by the NLU module are the most important ones. However, this leaves an important general question unanswered, namely which particular features contribute to what extent? (2) Moreover, the set of features which the NLU module produces appear to be rather specific to the HMIHY system and indicate things like the percentage of the input covered by the relevant grammar fragment, the presence or absence of context shifts, and the semantic diversity of subsequent utterances. Many current day spoken dialogue systems do not have such a sophisticated NLU module, and consequently it is unlikely that they have access to these kinds of features. In sum, it is uncertain whether other spoken dialogue systems can benefit from the findings described by Walker et al. (2000b) , since it is unclear which features are important and to what extent these features are available in other spoken dialogue systems. Finally, (3) we agree with Walker et al. (and the machine learning community at large) that it is important to compare different machine learning techniques to find out which techniques perform well for which kinds of tasks. Walker et al. found that RIPPER does not perform significantly better or worse than a memory-based learning technique. Is this incidental or does it reflect a general property of the problem detection task?",
                "cite_spans": [
                    {
                        "start": 1198,
                        "end": 1219,
                        "text": "Walker et al. (2000b)",
                        "ref_id": "BIBREF14"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related work",
                "sec_num": "2"
            },
            {
                "text": "The current paper uses a similar methodology for on-line problem detection as Walker et al. (2000b) , but (1) we take a bottom-up approach, focussing on a small number of features and investigating their usefulness on a per-feature basis and (2) the features which we study are automatically available in the majority of current spoken dialogue system: the sequence of system question types and the word graphs corresponding to the respective user utterances. A word graph is a lattice of word hypotheses, and we conjecture that various features which have been shown to cue communication problems (prosodic, linguistic and ASR features, see e.g., Hirschberg et al. 1999 , Krahmer et al. 1999 and Swerts et al. 2000 have correlates in the word graph. The sequence of system question types is taken to model the dialogue history. Finally, (3) to gain further insight into the adequacy of various machine learn-ing techniques for problem detection we use both RIPPER and the memory-based IB1-IG algorithm.",
                "cite_spans": [
                    {
                        "start": 78,
                        "end": 99,
                        "text": "Walker et al. (2000b)",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 648,
                        "end": 670,
                        "text": "Hirschberg et al. 1999",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 671,
                        "end": 692,
                        "text": ", Krahmer et al. 1999",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 693,
                        "end": 715,
                        "text": "and Swerts et al. 2000",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related work",
                "sec_num": "2"
            },
            {
                "text": "The corpus we used consisted of 3739 question-answer pairs, taken from 444 complete dialogues. The dialogues consist of users interacting with a Dutch spoken dialogue system which provides information about train time tables. The system prompts the user for unknown slots, such as departure station, arrival station, date, etc., in a series of questions. The system uses a combination of implicit and explicit verification strategies.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Data and Labeling",
                "sec_num": "3.1"
            },
            {
                "text": "The data were annotated with a highly limited set of labels. In particular, the kind of system question and whether the reply of the user gave rise to communication problems or not. The latter feature is the one to be predicted. The difference between an explicit verification and a yes/no question is that the former but not the latter is aimed at checking whether what the system understood or assumed corresponds with what the user wants. If the current system question is a repetition of the previous question it asked, this is indicated by the suffix R. A question only counts as a repetition when it has the same contents as the previous system question. Of the user inputs, we only labeled whether they gave rise to a communication problem or not. A communication problem arises when the value which the system assigns to a particular slot (departure station, date, etc.) does not coincide with the value given for that particular slot by the user in his or her most recent contribution to the dialogue or when the system makes an incorrect default assumption (e.g., the dialogue manager assumes that the date slot should be filled with the current date, i.e., that the user wants to travel today). Communication problems are generally easy to label since the spoken dialogue system under consideration here always provides direct feedback (via verification questions) about what it believes the user intends. Consider the following exchange.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Data and Labeling",
                "sec_num": "3.1"
            },
            {
                "text": "U: I want to go to Amsterdam. S: So you want to go to Rotterdam?",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Data and Labeling",
                "sec_num": "3.1"
            },
            {
                "text": "As soon as the user hears the explicit verification question of the system, it will be clear that his or her last turn was misunderstood. The problemfeature was labeled by two of the authors to avoid labeling errors. Differences between the two annotators were infrequent and could always easily be resolved.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Data and Labeling",
                "sec_num": "3.1"
            },
            {
                "text": "Of the 3739 user utterances 1564 gave rise to communication problems (an error rate of 41.8%). The majority class is thus formed by the unproblematic user utterances, which form 58.2% of all user utterances. This suggests that the baseline for predicting communication problems is obtained by always predicting that there are no communication problems. This strategy has an accuracy of 58.2%, and a recall of 0% (all problems are missed).T he precision is not defined,\u00a9 and consequently neither is the . This baseline is misleading, however, when we are interested in predicting whether the previous user utterance gave rise to communication problems. There are cases when the dialogue system is itself clearly aware of communication problems. This is in particular the case when the system repeats the question (labeled with the suffix R) or when it asks a metaquestion (M). In the corpus under investigation here this happens 1024 times. It would not be For definitions of accuracy, precision and recall see e.g., Manning and Sch\u00fctze (1999:268-269 ).",
                "cite_spans": [
                    {
                        "start": 1016,
                        "end": 1049,
                        "text": "Manning and Sch\u00fctze (1999:268-269",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Baselines",
                "sec_num": "3.2"
            },
            {
                "text": "Since 0 cases are selected, one would have to divide by 0 to determine precision for this baseline.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Baselines",
                "sec_num": "3.2"
            },
            {
                "text": "Throughout this paper we use the Table 1 : Baselines very illuminating to develop an automatic error detector which detects only those problems that the system was already aware of. Therefore we take the following as our base-line strategy for predicting whether the previous user utterance gave rise to problems, henceforth referred to as the system-knows-baseline:",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 33,
                        "end": 40,
                        "text": "Table 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Baselines",
                "sec_num": "3.2"
            },
            {
                "text": "if the Q(@ ) is repetition or meta-question, then predict user utterance @ -1 caused problems, else predict user utterance @ -1 caused no problems.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Baselines",
                "sec_num": "3.2"
            },
            {
                "text": "This 'strategy' predicts problems with an accuracy of 85.6% (1024 of the 1564 problems are detected, thus 540 of 3739 decisions are wrong), a precision of 100% (of 1024 predicted problems 1024 were indeed problematic), a recall of 65.5% (1024 of the 1564 problems are predicted to be problematic) and thus an A B",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Baselines",
                "sec_num": "3.2"
            },
            {
                "text": "of 79.1. This is a sharp baseline, but for predicting whether the previous user utterance caused problems or not the system-knows-baseline is much more informative and relevant than the majority-classbaseline. Table 1 summarizes the baselines.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 210,
                        "end": 217,
                        "text": "Table 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Baselines",
                "sec_num": "3.2"
            },
            {
                "text": "Question-answer pairs were represented as feature vectors (or patterns) of the following form. Six features were reserved for the history of system questions asked so far in the current dialogue (6Q). Of course, if the system only asked 3 questions so far, only 3 types of system questions are stored in memory and the remaining three features for system question are not assigned a value. The representation of the user's answer is derived from the word graph produced by the ASR module. It should be kept in mind that in general the word graph is much more complex than the recognized string. The latter typically is the most plausible path (e.g., on the basis of acoustic confidence scores) in the word graph, which itself may contain many other paths. Different systems determine the plausibility of paths in the word graph in different ways. Here, for the sake of generality, we abstract over such differences and simply represent a word graph as a Bag of Words (BoW), collecting all words that occur in one of the paths, irrespective of the associated acoustic confidence score. A lexicon was derived of all the words and phrases that occurred in the corpus. Each word graph is represented as a sequence of bits, where the C -th bit is set to 1 if the C -th word in the pre-derived lexicon occurred at least once in the word graph corresponding to the current user utterance and 0 otherwise. Finally, for each user utterance, a feature is reserved for indicating whether it gave rise to communication problems or not. This latter feature is the one to be predicted.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Feature representations",
                "sec_num": "3.3"
            },
            {
                "text": "There are basically two approaches for detecting communication problems. One is to try to decide on the basis of the current user utterance whether it will be recognized and interpreted correctly or not. The other approach uses the current user utterance to determine whether the processing of the previous user utterance gave rise to communication problems. This approach is based on the assumption that users give feedback on communication problems when they notice that the system misunderstood their previous input. In this study, eight prediction tasks have been defined: the first three are concerned with predicting whether the current user input will cause problems, and naturally, for these three tasks, the majority-class-baseline is the relevant one; the last five tasks are concerned with predicting whether the previous user utterance caused problems, and for these the sharp, system-knows-baseline is the appropriate one. The eight tasks are: (1) predict on the basis of the (representation of the) current word graph BoW ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Feature representations",
                "sec_num": "3.3"
            },
            {
                "text": "For the experiments we used the rule-induction algorithm RIPPER (Cohen 1996) and the memory-based IB1-IG algorithm (Aha et al. 1991 , Daelemans et al. 1997 .D RIPPER is a fast rule induction algorithm. It starts with splitting the training set in two. On the basis of one half, it induces rules in a straightforward way (roughly, by trying to maximize coverage for each rule), with potential overfitting. When the induced rules classify instances in the other half below a certain threshold, they are not stored. Rules are induced per class. By default the ordering is from low-frequency classes to high frequency ones, leaving the most frequent class as the default rule, which is generally beneficial for the size of the rule set.",
                "cite_spans": [
                    {
                        "start": 64,
                        "end": 76,
                        "text": "(Cohen 1996)",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 115,
                        "end": 131,
                        "text": "(Aha et al. 1991",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 132,
                        "end": 155,
                        "text": ", Daelemans et al. 1997",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Learning techniques",
                "sec_num": "3.4"
            },
            {
                "text": "The memory-based IB1-IG algorithm is one of the primary memory-based learning algorithms. Memory-based learning techniques can be characterized by the fact that they store a representation of a set of training data in memory, and classify new instances by looking for the most similar instances in memory. The most basic distance function between two features is the overlap metric in 1 gives a point-wise distance between features which is 1 if",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Learning techniques",
                "sec_num": "3.4"
            },
            {
                "text": "b d i h p f",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Learning techniques",
                "sec_num": "3.4"
            },
            {
                "text": "and 0 otherwise.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Learning techniques",
                "sec_num": "3.4"
            },
            {
                "text": "EQUATION",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [
                    {
                        "start": 0,
                        "end": 8,
                        "text": "EQUATION",
                        "ref_id": "EQREF",
                        "raw_str": "E G F I H q P S R r T p s t V u F c b v \u00a4 P S f w T",
                        "eq_num": "(1)"
                    }
                ],
                "section": "Learning techniques",
                "sec_num": "3.4"
            },
            {
                "text": "Both learning techniques were used for the same 8 prediction tasks, and received exactly the same feature vectors as input. All experiments were performed using ten-fold cross-validation, which yields errors margins in the predictions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Learning techniques",
                "sec_num": "3.4"
            },
            {
                "text": "First we look at the results obtained with the IB1-IG algorithm (see Table 2 ). Consider the problem of predicting whether the current user utterance will cause problems. Either looking at the current word graph (BoW @ ), at the six most recent system questions (6Q @ ) or at both, leads to a significant improvement with respect to the majorityclass-baseline.x The best results are obtained with only the system question types (although the difference with the results for the other two tasks is not significant): a 63.7% accuracy and an of 58.3. However, even though this is a significant improvement over the majority-class-baseline, the accuracy is improved with only 5.5%.y",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 69,
                        "end": 76,
                        "text": "Table 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Results",
                "sec_num": "4"
            },
            {
                "text": "Next consider the problem of predicting whether the previous user utterance caused communication problems (these are the five remaining tasks). The best result is obtained by taking the two most recent word graphs and the six most recent system question types as input. This yields an accuracy of 88.1%, which is a significant improvement with respect to the All checks for significance were performed with a onetailed test.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results",
                "sec_num": "4"
            },
            {
                "text": "As an aside, we performed one experiment with the words in the actual, transcribed user utterance at time instead of BoW , where the task is to predict whether the current user utterance would cause a communication problem. This resulted in an accuracy of 64.2% (with a standard deviation of 1.1%). This is not significantly better than the result obtained with the BoW. , with standard deviations) on the eight prediction tasks.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results",
                "sec_num": "4"
            },
            {
                "text": ": this accuracy significantly improves the majority-class-baseline ( B ).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results",
                "sec_num": "4"
            },
            {
                "text": ": this accuracy significantly improves the system-knows-baseline ( B ). : this accuracy result is significantly better than the IB1-IG result given in Table 2 for this particular task, with .05.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 151,
                        "end": 158,
                        "text": "Table 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Results",
                "sec_num": "4"
            },
            {
                "text": ": this accuracy result is significantly better than the IB1-IG result given in Table 2 for this particular task, with .001.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 79,
                        "end": 86,
                        "text": "Table 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "\u00a2",
                "sec_num": null
            },
            {
                "text": ": this accuracy result is significantly better than the IB1-IG result given in Table 2 for this particular task, with q .01.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 79,
                        "end": 86,
                        "text": "Table 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "\u00a6",
                "sec_num": null
            },
            {
                "text": "sharp, system-knows-baseline. In addition, the A B",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "\u00a6",
                "sec_num": null
            },
            {
                "text": "of 84.8 is nearly 6 points higher than that of the relevant, majority-class baseline.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "\u00a6",
                "sec_num": null
            },
            {
                "text": "The results obtained with RIPPER are shown in Table 3 . On the problem of predicting whether the current user utterance will cause a problem, RIPPER obtains the best results by taking as input both the current word graph and the types of the six most recent system questions, predicting problems with an accuracy of 66.0%. This is a significant improvement over the majority-class-baseline, but the result is not significantly better than that obtained with either the word graph or the system questions in isolation. Interestingly, the result is significantly better than the results for IB1-IG on the same task.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 46,
                        "end": 53,
                        "text": "Table 3",
                        "ref_id": "TABREF2"
                    }
                ],
                "eq_spans": [],
                "section": "\u00a6",
                "sec_num": null
            },
            {
                "text": "On the problem of predicting whether the previous user utterance caused a problem, RIPPER obtains the best results by taking all features into account (that is: the two most recent bags of words and the six system questions). This results in a 91.1% accuracy, which is a significant improvement over the sharp system-knows-baseline. This implies that 38% of the communication problems which were not detected by the dialogue system d Notice that RIPPER sometimes performs below the system-knows-baseline, even though the relevant feature (in particular the type of the last system question) is present. Inspection of the RIPPER rules obtained by training only on 6Q reveals that RIPPER learns a slightly suboptimal rule set, thereby misclassifying 10 instances on average. -1, and the six most recent system questions. Based on the entire data set. The question features are defined in section 2. The word \"naar\" is Dutch for to, \"om\" for at, \"uur\" for hour, \"van\" for from, \"vanuit\" is slightly archaic variant of \"van\" (from), \"ik\" is Dutch for I, \"nee\" for no, \"niet\" for not and \"wil\", finally, for want. The (U /i ) numbers at the end of each line indicate how many correct (U ) and incorrect (i ) decisions were taken using this particular if ...then ...statement.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "\u00a6",
                "sec_num": null
            },
            {
                "text": "under investigation could be classified correctly using features which were already present in the system (word graphs and system question types). Moreover, the is 89, which is 10 points higher than the B associated with the systemknows baseline strategy. Notice also that this RIP-PER result is significantly better than the IB1-IG results for the same task.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "\u00a6",
                "sec_num": null
            },
            {
                "text": "To gain insight into the rules learned by RIP-PER for the last task, we applied RIPPER to the complete data set. The rules induced are displayed in Figure 1 . RIPPER's first rule is concerned with repeated questions (compare with the system-knows-baseline). One important property of many other rules is that they explicitly combine pieces of information from the three main sources of information (the system questions, the current word graph and the previous word graph). Moreover, it is interesting to note that the words which crop up in the RIPPER rules are primarily function words. Another noteworthy feature of the RIPPER rules is that they reflect certain properties which have been claimed to cue communication problems. For instance, Krahmer et al. (1999) , in their descriptive analysis of dialogue problems, found that repeated material is often an indication of problems, as is the use of a marked vocabulary. The rules 2, 3 and 7 are examples of the former cue, while the occurrence of the somewhat archaic \"vanuit\" instead of the ordinary \"van\" is an example of the latter.",
                "cite_spans": [
                    {
                        "start": 745,
                        "end": 766,
                        "text": "Krahmer et al. (1999)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 148,
                        "end": 156,
                        "text": "Figure 1",
                        "ref_id": "FIGREF3"
                    }
                ],
                "eq_spans": [],
                "section": "\u00a6",
                "sec_num": null
            },
            {
                "text": "In this study we have looked at automatic methods for problem detection using simple features which are available in the vast majority of spoken dialogue systems, and require little or no computational overhead. We have investigated two approaches to problem detection. The first approach is aimed at testing whether a user utterance, captured in a noisyj word graph, and/or the recent history of system utterances, would be predictive of whether the utterance itself would be misrecognised. The results, which basically represents a signal quality test, show that problematic cases could be discerned with an accuracy of about 65%. Although this is somewhat above the baseline of 58% decision accuracy when no problems would be predicted, signalling recognition problems with word graph features and previous system question types as predictors is a hard task. As other studies suggest (e.g., Hirschberg et al. 1999) , confidence scores and acoustic/prosodic features could be of help.",
                "cite_spans": [
                    {
                        "start": 894,
                        "end": 917,
                        "text": "Hirschberg et al. 1999)",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "5"
            },
            {
                "text": "The second approach tested whether the word graph for the current user utterance and/or the recent history of system question types could be employed to predict whether the previous user k In the sense that it is not a perfect image of the users input.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "5"
            },
            {
                "text": "utterance caused communication problems. The underlying assumption is that users will signal problems as soon as they become aware of them through the feedback provided by the system. Thus, in a sense, this second approach represents a noisy channel filtering task: the current utterance has to be decoded as signalling a problem or not. As the results show, this task can be performed at a surprisingly high level: about 91% decision accuracy (which is an error reduction of 38%), with an A B",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "5"
            },
            {
                "text": "of the problem category of 89. This result can only be obtained using a combination of features; neither the word graph features in isolation nor the system question types in isolation offer enough predictive power to reach above the sharp baseline of 86% accuracy and an A B on the problem category of 79.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "5"
            },
            {
                "text": "Keeping information sources isolated or combining them directly influences the relative performances of the memory-based IB1-IG algorithm versus the RIPPER rule induction algorithm. When features are of the same type, accuracies of the memory-based and the ruleinduction systems do not differ significantly (with one exception). In contrast, when features from different sources (e.g., words in the word graph and question type features) are combined, RIPPER profits more than IB1-IG does, causing RIPPER to perform significantly more accurately. The feature independence assumption of memory-based learning appears to be the harming cause: by its definition, IB1-IG does not give extra weight to apparently relevant interactions of feature values from different sources. In contrast, in nine out of the twelve rules that RIPPER produces, word graph features and system questions type features are explicitly integrated as joint left-hand side conditions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "5"
            },
            {
                "text": "The current results show that for on-line detection of communication problems at the utterance level it is already beneficial to pay attention only to the lexical information in the word graph and the sequence of system question types, features which are present in most spoken dialogue system and which can be obtained with little or no computational overhead. An approach to automatic problem detection is potentially very useful for spoken dialogue systems, since it gives a quantitative criterion for, for instance, changing the dia-logue strategy (initiative, verification) or speech recognition engine (from one trained on normal speech to one trained on hyperarticulate speech).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "5"
            }
        ],
        "back_matter": [],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Instance-based Learning Algorithms",
                "authors": [
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Aha",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Kibler",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Albert",
                        "suffix": ""
                    }
                ],
                "year": 1991,
                "venue": "Machine Learning",
                "volume": "6",
                "issue": "",
                "pages": "36--66",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Aha, D., Kibler, D., Albert, M. (1991), Instance-based Learning Algorithms, Machine Learning, 6:36-66.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Learning trees and rules with set-valued features",
                "authors": [
                    {
                        "first": "W",
                        "middle": [],
                        "last": "Cohen",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "Proc. 13th AAAI",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Cohen, W. (1996), Learning trees and rules with set-valued features, Proc. 13th AAAI.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "IGTree: using trees for compression and classification in lazy learning algorithms",
                "authors": [
                    {
                        "first": "W",
                        "middle": [],
                        "last": "Daelemans",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Van Den Bosch",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Weijters",
                        "suffix": ""
                    }
                ],
                "year": 1997,
                "venue": "Artificial Intelligence Review",
                "volume": "11",
                "issue": "",
                "pages": "407--423",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Daelemans, W., van den Bosch, A., Weijters, A. (1997), IGTree: using trees for compression and classification in lazy learning algorithms, Artificial Intelligence Re- view 11:407-423.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "TiMBL: Tilburg Memory-Based Learner, version 3.0, reference guide",
                "authors": [
                    {
                        "first": "W",
                        "middle": [],
                        "last": "Daelemans",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Zavrel",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Van Der Sloot",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Van Den Bosch",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Daelemans, W., Zavrel, J., van der Sloot, K., van den Bosch, A. (2000), TiMBL: Tilburg Memory-Based Learner, version 3.0, refer- ence guide, ILK Technical Report 00-01, http://ilk.kub.nl/l ilk/papers/ilk0001.ps.gz.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "How may I Help You?",
                "authors": [
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Gorin",
                        "suffix": ""
                    },
                    {
                        "first": "G",
                        "middle": [],
                        "last": "Riccardi",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Wright",
                        "suffix": ""
                    }
                ],
                "year": 1997,
                "venue": "Speech Communication",
                "volume": "23",
                "issue": "",
                "pages": "113--127",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Gorin, A., Riccardi, G., Wright, J. (1997), How may I Help You?, Speech Communication 23:113-127.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Prosodic cues to recognition errors",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Hirschberg",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Litman",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Swerts",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "Proc. ASRU",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Hirschberg, J., Litman, D., Swerts, M. (1999), Prosodic cues to recognition errors, Proc. ASRU, Keystone, CO.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Error spotting in human-machine interactions",
                "authors": [
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Krahmer",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Swerts",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Theune",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Weegels",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "Proc. EUROSPEECH",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Krahmer, E., Swerts, M., Theune, M., Weegels, M., (1999), Error spotting in human-machine interactions, Proc. EUROSPEECH, Budapest, Hungary.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Predicting and adapting to poor speech recongition in a spoken dialogue system",
                "authors": [
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Litman",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Pan",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proc. 17th AAAI",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Litman, D., Pan, S. (2000), Predicting and adapting to poor speech recongition in a spoken dialogue system, Proc. 17th AAAI, Austin, TX.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Automatic Detection of Poor Speech Recognition at the Dialogue Level",
                "authors": [
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Litman",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Walker",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Kearns",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "Proc. ACL'99",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Litman, D., Walker, M., Kearns, M. (1999), Automatic De- tection of Poor Speech Recognition at the Dialogue Level. Proc. ACL'99, College Park, MD.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Foundations of Statistical Natural Language Processing",
                "authors": [
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Manning",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Sch\u00fctze",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Manning, C., Sch\u00fctze, H., (1999), Foundations of Statist- ical Natural Language Processing, The MIT Press, Cambridge, MA.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Information Retrieval",
                "authors": [
                    {
                        "first": "C",
                        "middle": [
                            "J"
                        ],
                        "last": "Van Rijsbergen",
                        "suffix": ""
                    }
                ],
                "year": 1979,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "van Rijsbergen, C.J. (1979), Information Retrieval, Lon- don: Buttersworth.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "On the influence of hyperarticulated speech on recognition performance",
                "authors": [
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Soltau",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Waibel",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Proc. ICSLP'98",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Soltau, H., Waibel, A. (1998), On the influence of hyper- articulated speech on recognition performance, Proc. ICSLP'98, Sydney, Australia",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Corrections in spoken dialogue systems",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Swerts",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Litman",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Hirschberg",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proc. ICSLP",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Swerts, M., Litman, D., Hirschberg, J. (2000), Correc- tions in spoken dialogue systems, Proc. ICSLP 2000, Beijing, China.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Learning to predict problematic situations in a spoken dialogue system: Experiment with How May I Help You?",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Walker",
                        "suffix": ""
                    },
                    {
                        "first": "I",
                        "middle": [],
                        "last": "Langkilde",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Wright",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Gorin",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Litman",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proc. NAACL",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Walker, M., Langkilde, I., Wright, J., Gorin, A., Litman, D. (2000a), Learning to predict problematic situations in a spoken dialogue system: Experiment with How May I Help You?, Proc. NAACL, Seattle, WA.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Using natural language processing and discourse features to identify understanding errors in a spoken dialogue system",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Walker",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Wright",
                        "suffix": ""
                    },
                    {
                        "first": "I",
                        "middle": [],
                        "last": "Langkilde",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proc. ICML",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Walker, M., Wright, J. Langkilde, I. (2000b), Using nat- ural language processing and discourse features to identify understanding errors in a spoken dialogue sys- tem, Proc. ICML, Stanford, CA.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "num": null,
                "text": "The following labels are used for the system questions. O open questions (\"From where to where do you want to travel?\") I implicit verification (\"When do you want to travel from Tilburg to Schiphol Airport?\") E explicit verification (\"So you want to travel from Tilburg to Schiphol Airport?\") Y yes/no question (\"Do you want me to repeat the connection?\") M Meta-questions (\"Can you please correct me?\")",
                "uris": null,
                "type_str": "figure"
            },
            "FIGREF1": {
                "num": null,
                "text": "174) to combine precision and recall in a single measure. By setting $ equal to 1, precision and recall are given an equal weight, and the measure simplifies to",
                "uris": null,
                "type_str": "figure"
            },
            "FIGREF2": {
                "num": null,
                "text": "the current user utterance will cause a communication problem, (3) predict on the basis of both BoW @ and 6Q @ , whether the current user utterance will cause a problem, (4) predict on the basis of the current word graph BoW @ , whether the previous user utterance, uttered at time @ -1, caused a problem, (5) predict on the basis of the six most recent system questions, whether the previous user utterance caused a problem, (6) predict on the basis of BoW @ and 6Q @ , whether the previous user utterance caused a problem, (7) predict on the basis of the two most recent word graphs, BoW @ -1 and BoW @ , whether the previous user utterance caused a problem, and finally (8) predict on the basis of the two most recent word graphs, BoW @",
                "uris": null,
                "type_str": "figure"
            },
            "FIGREF3": {
                "num": null,
                "text": "RIPPER rule set for predicting whether user utterance @ -1 caused communication problems on the basis of the Bags of Words for @ and @",
                "uris": null,
                "type_str": "figure"
            },
            "TABREF2": {
                "num": null,
                "content": "<table/>",
                "text": "RIPPER results (accuracy, precision, recall, and A B",
                "html": null,
                "type_str": "table"
            }
        }
    }
}