File size: 62,655 Bytes
6fa4bc9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 | {
"paper_id": "P00-1013",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T08:13:32.405185Z"
},
"title": "Spoken Dialogue Management Using Probabilistic Reasoning",
"authors": [
{
"first": "Nicholas",
"middle": [],
"last": "Roy",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Robotics Institute Carnegie Mellon University Pittsburgh",
"location": {
"postCode": "15213",
"region": "PA"
}
},
"email": ""
},
{
"first": "Joelle",
"middle": [],
"last": "Pineau",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Robotics Institute Carnegie Mellon University Pittsburgh",
"location": {
"postCode": "15213",
"region": "PA"
}
},
"email": ""
},
{
"first": "Sebastian",
"middle": [],
"last": "Thrun",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Robotics Institute Carnegie Mellon University Pittsburgh",
"location": {
"postCode": "15213",
"region": "PA"
}
},
"email": ""
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Spoken dialogue managers have benefited from using stochastic planners such as Markov Decision Processes (MDPs). However, so far, MDPs do not handle well noisy and ambiguous speech utterances. We use a Partially Observable Markov Decision Process (POMDP)-style approach to generate dialogue strategies by inverting the notion of dialogue state; the state represents the user's intentions, rather than the system state. We demonstrate that under the same noisy conditions, a POMDP dialogue manager makes fewer mistakes than an MDP dialogue manager. Furthermore, as the quality of speech recognition degrades, the POMDP dialogue manager automatically adjusts the policy.",
"pdf_parse": {
"paper_id": "P00-1013",
"_pdf_hash": "",
"abstract": [
{
"text": "Spoken dialogue managers have benefited from using stochastic planners such as Markov Decision Processes (MDPs). However, so far, MDPs do not handle well noisy and ambiguous speech utterances. We use a Partially Observable Markov Decision Process (POMDP)-style approach to generate dialogue strategies by inverting the notion of dialogue state; the state represents the user's intentions, rather than the system state. We demonstrate that under the same noisy conditions, a POMDP dialogue manager makes fewer mistakes than an MDP dialogue manager. Furthermore, as the quality of speech recognition degrades, the POMDP dialogue manager automatically adjusts the policy.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "The development of automatic speech recognition has made possible more natural human-computer interaction. Speech recognition and speech understanding, however, are not yet at the point where a computer can reliably extract the intended meaning from every human utterance. Human speech can be both noisy and ambiguous, and many real-world systems must also be speaker-independent. Regardless of these difficulties, any system that manages human-machine dialogues must be able to perform reliably even with noisy and stochastic speech input. Recent research in dialogue management has shown that Markov Decision Processes (MDPs) can be useful for generating effective dialogue strategies (Young, 1990; Levin et al., 1998) ; the system is modelled as a set of states that represent the dialogue as a whole, and a set of actions corresponding to speech productions from the system. The goal is to maximise the reward obtained for fulfilling a user's request. However, the correct way to represent the state of the dialogue is still an open problem (Singh et al., 1999) . A common solution is to restrict the system to a single goal. For example, in booking a flight in an automated travel agent system, the system state is described in terms of how close the agent is to being able to book the flight.",
"cite_spans": [
{
"start": 687,
"end": 700,
"text": "(Young, 1990;",
"ref_id": "BIBREF14"
},
{
"start": 701,
"end": 720,
"text": "Levin et al., 1998)",
"ref_id": "BIBREF4"
},
{
"start": 1045,
"end": 1065,
"text": "(Singh et al., 1999)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Such systems suffer from a principal problem. A conventional MDP-based dialogue manager must know the current state of the system at all times, and therefore the state has to be wholly contained in the system representation. These systems perform well under certain conditions, but not all. For example, MDPs have been used successfully for such tasks as retrieving e-mail or making travel arrangements (Walker et al., 1998; Levin et al., 1998) over the phone, task domains that are generally low in both noise and ambiguity. However, the issue of reliability in the face of noise is a major concern for our application. Our dialogue manager was developed for a mobile robot application that has knowledge from several domains, and must interact with many people over time. For speaker-independent systems and systems that must act in a noisy environment, the user's action and intentions cannot always be used to infer the dialogue state; it may be not be possible to reliably and completely determine the state of the dialogue following each utterance. The poor reliability of the audio signal on a mobile robot, coupled with the expectations of natural interaction that people have with more anthropomorphic interfaces, increases the demands placed on the dialogue manager.",
"cite_spans": [
{
"start": 403,
"end": 424,
"text": "(Walker et al., 1998;",
"ref_id": "BIBREF13"
},
{
"start": 425,
"end": 444,
"text": "Levin et al., 1998)",
"ref_id": "BIBREF4"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Most existing dialogue systems do not model confidences on recognition accuracy of the human utterances, and therefore do not account for the reliability of speech recognition when applying a dialogue strategy. Some systems do use the log-likelihood values for speech utterances, however these values are only thresholded to indicate whether the utterance needs to be confirmed (Niimi and Kobayashi, 1996; Singh et al., 1999) . An important concept lying at the heart of this issue is that of observability -the ultimate goal of a dialogue system is to satisfy a user request; however, what the user really wants is at best partially observable.",
"cite_spans": [
{
"start": 378,
"end": 405,
"text": "(Niimi and Kobayashi, 1996;",
"ref_id": "BIBREF6"
},
{
"start": 406,
"end": 425,
"text": "Singh et al., 1999)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We handle the problem of partial observability by inverting the conventional notion of state in a dialogue. The world is viewed as partially unobservable -the underlying state is the intention of the user with respect to the dialogue task. The only observations about the user's state are the speech utterances given by the speech recognition system, from which some knowledge about the current state can be inferred. By accepting the partial observability of the world, the dialogue problem becomes one that is addressed by Partially Observable Markov Decision Processes (POMDPs) (Sondik, 1971) . Finding an optimal policy for a given POMDP model corresponds to defining an optimal dialogue strategy. Optimality is attained within the context of a set of rewards that define the relative value of taking various actions.",
"cite_spans": [
{
"start": 581,
"end": 595,
"text": "(Sondik, 1971)",
"ref_id": "BIBREF11"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We will show that conventional MDP solutions are insufficient, and that a more robust methodology is required. Note that in the limit of perfect sensing, the POMDP policy will be equivalent to an MDP policy. What the POMDP policy offers is an ability to compensate appropriately for better or worse sensing. As the speech recognition degrades, the POMDP policy acquires reward more slowly, but makes fewer mistakes and blind guesses compared to a conventional MDP policy.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "There are several POMDP algorithms that may be the natural choice for policy generation (Sondik, 1971; Monahan, 1982; Parr and Russell, 1995; Cassandra et al., 1997; Kaelbling et al., 1998; Thrun, 1999) . However, solving real world dialogue scenarios is computationally in-tractable for full-blown POMDP solvers, as the complexity is doubly exponential in the number of states. We therefore will use an algorithm for finding approximate solutions to POMDP-style problems and apply it to dialogue management. This algorithm, the Augmented MDP, was developed for mobile robot navigation (Roy and Thrun, 1999) , and operates by augmenting the state description with a compression of the current belief state. By representing the belief state succinctly with its entropy, belief-space planning can be approximated without the expected complexity.",
"cite_spans": [
{
"start": 88,
"end": 102,
"text": "(Sondik, 1971;",
"ref_id": "BIBREF11"
},
{
"start": 103,
"end": 117,
"text": "Monahan, 1982;",
"ref_id": "BIBREF5"
},
{
"start": 118,
"end": 141,
"text": "Parr and Russell, 1995;",
"ref_id": "BIBREF7"
},
{
"start": 142,
"end": 165,
"text": "Cassandra et al., 1997;",
"ref_id": "BIBREF2"
},
{
"start": 166,
"end": 189,
"text": "Kaelbling et al., 1998;",
"ref_id": "BIBREF3"
},
{
"start": 190,
"end": 202,
"text": "Thrun, 1999)",
"ref_id": "BIBREF12"
},
{
"start": 586,
"end": 607,
"text": "(Roy and Thrun, 1999)",
"ref_id": "BIBREF9"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In the first section of this paper, we develop the model of dialogue interaction. This model allows for a more natural description of dialogue problems, and in particular allows for intuitive handling of noisy and ambiguous dialogues. Few existing dialogues can handle ambiguous input, typically relying on natural language processing to resolve semantic ambiguities (Aust and Ney, 1998) . Secondly, we present a description of an example problem domain, and finally we present experimental results comparing the performance of the POMDP (approximated by the Augmented MDP) to conventional MDP dialogue strategies.",
"cite_spans": [
{
"start": 367,
"end": 387,
"text": "(Aust and Ney, 1998)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "A Partially Observable Markov Decision Process (POMDP) is a natural way of modelling dialogue processes, especially when the state of the system is viewed as the state of the user. The partial observability capabilities of a POMDP policy allows the dialogue planner to recover from noisy or ambiguous utterances in a natural and autonomous way. At no time does the machine interpreter have any direct knowledge of the state of the user, i.e, what the user wants. The machine interpreter can only infer this state from the user's noisy input. The POMDP framework provides a principled mechanism for modelling uncertainty about what the user is trying to accomplish.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dialogue Systems and POMDPs",
"sec_num": "2"
},
{
"text": "The POMDP consists of an underlying, unobservable Markov Decision Process. The MDP is specified by:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dialogue Systems and POMDPs",
"sec_num": "2"
},
{
"text": "V a set of states W Y X \u00e0 ) b d c \u00eb F b @ f \u00eb @ g g gh i V a set of actions p P X \u00e0 B q c \u00eb a q \u00a2 f @ e g @ g g e F q ! r s i V a set of transition probabilities t P u v b 9 w x e F q e F b @ y u v b w b ! e F q ( y V a set of rewards B W p V",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dialogue Systems and POMDPs",
"sec_num": "2"
},
{
"text": "an initial state b",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dialogue Systems and POMDPs",
"sec_num": "2"
},
{
"text": "The actions represent the set of responses that the system can carry out. The transition probabilities form a structure over the set of states, connecting the states in a directed graph with arcs between states with non-zero transition probabilities. The rewards define the relative value of accomplishing certain actions when in certain states.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dialogue Systems and POMDPs",
"sec_num": "2"
},
{
"text": "The POMDP adds: ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dialogue Systems and POMDPs",
"sec_num": "2"
},
{
"text": "V a set of observations X \u00e0 B c \u00eb a B f \u00eb @ g g g e F B d i V a set of observation probabilities e f u v",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dialogue Systems and POMDPs",
"sec_num": "2"
},
{
"text": "b @ with an initial belief, u h b $ b @ i X j W Y y V",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dialogue Systems and POMDPs",
"sec_num": "2"
},
{
"text": "the set of rewards with rewards conditioned on observations as well:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dialogue Systems and POMDPs",
"sec_num": "2"
},
{
"text": "B W p k l",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dialogue Systems and POMDPs",
"sec_num": "2"
},
{
"text": "The observations consist of a set of keywords which are extracted from the speech utterances. The POMDP plans in belief space; each belief consists of a probability distribution over the set of states, representing the respective probability that the user is in each of these states. The initial belief specified in the model is updated every time the system receives a new observation from the user.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dialogue Systems and POMDPs",
"sec_num": "2"
},
{
"text": "The POMDP model, as defined above, first goes through a planning phase, during which it finds an optimal strategy, or policy, which describes an optimal mapping of action",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dialogue Systems and POMDPs",
"sec_num": "2"
},
{
"text": "q to be- lief u v b S m b 2 X \u00a6 W Y y",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dialogue Systems and POMDPs",
"sec_num": "2"
},
{
"text": ", for all possible beliefs. The dialogue manager uses this policy to direct its behaviour during conversations with users. The optimal strategy for a POMDP is one that prescribes action selection that maximises the expected reward. Unfortunately, finding an optimal policy exactly for all but the most trivial POMDP problems is computationally intractable. A near-optimal policy can be computed significantly faster than an exact one, at the expense of a slight reduction in performance. This is often done by imposing restrictions on the policies that can be selected, or by simplifying the belief state and solving for a simplified uncertainty representation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dialogue Systems and POMDPs",
"sec_num": "2"
},
{
"text": "In the Augmented MDP approach, the POMDP problem is simplified by noticing that the belief state of the system tends to have a certain structure. The uncertainty that the system has is usually domain-specific and localised. For example, it may be likely that a household robot system can confuse TV channels ('ABC' for 'NBC'), but it is unlikely that the system will confuse a TV channel request for a request to get coffee. By making the localised assumption about the uncertainty, it becomes possible to summarise any given belief vector by a pair consisting of the most likely state, and the entropy of the belief state.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dialogue Systems and POMDPs",
"sec_num": "2"
},
{
"text": "n u v o \u00ff q p r t s u \u00a6 v \u00a2 w t s d x y n u v o \u00ff 9 z F { | u n u v o @ y \u00a6 y } (1) { | u n u v o @ y C y c n u v o @ y v f n u v o @ y (2)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dialogue Systems and POMDPs",
"sec_num": "2"
},
{
"text": "The entropy of the belief state approximates a sufficient statistic for the entire belief state 1 . Given this assumption, we can plan a policy for every possible such`state, entropyi pair, that approximates the POMDP policy for the corresponding belief n u v o @ y . ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dialogue Systems and POMDPs",
"sec_num": "2"
},
{
"text": "The system that was used throughout these experiments is based on a mobile robot, Florence Nightingale (Flo), developed as a prototype nursing home assistant. Flo uses the Sphinx II speech recognition system (Ravishankar, 1996) , and the Festival speech synthesis system (Black et al., 1999) . Figure 1 shows a picture of the robot.",
"cite_spans": [
{
"start": 208,
"end": 227,
"text": "(Ravishankar, 1996)",
"ref_id": "BIBREF8"
},
{
"start": 271,
"end": 291,
"text": "(Black et al., 1999)",
"ref_id": "BIBREF1"
}
],
"ref_spans": [
{
"start": 294,
"end": 302,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "The Example Domain",
"sec_num": "3"
},
{
"text": "Since the robot is a nursing home assistant, we use task domains that are relevant to assisted living in a home environment. Table 1 shows a list of the task domains the user can inquire about (the time, the patient's medication schedule, what is on different TV stations), in addition to a list of robot motion commands. These abilities have all been implemented on Flo. The medication schedule is pre-programmed, the information about the TV schedules is downloaded on request from the web, and the motion commands correspond to pre-selected robot navigation sequences. If we translate these tasks into the framework that we have described, the decision problem has 13 states, and the state transition graph is given in Figure 2 . The different tasks have varying levels of complexity, from simply saying the time, to going through a list of medications. For simplicity, only the maximum-likelihood transitions are shown in Figure 2 . Note that this model is handcrafted. There is ongoing research into learning policies automatically using reinforcement learning (Singh et al., 1999) ; dialogue models could be learned in a similar manner. This example model is simply to illustrate the utility of the POMDP approach.",
"cite_spans": [
{
"start": 1066,
"end": 1086,
"text": "(Singh et al., 1999)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [
{
"start": 125,
"end": 132,
"text": "Table 1",
"ref_id": "TABREF2"
},
{
"start": 722,
"end": 730,
"text": "Figure 2",
"ref_id": null
},
{
"start": 926,
"end": 934,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "The Example Domain",
"sec_num": "3"
},
{
"text": "There are 20 different actions; 10 actions correspond to different abilities of the robot such as going to the kitchen, or giving the time. The remaining 10 actions are clarification or confirmation actions, such as re-confirming the desired TV channel. There are 16 observations that correspond to relevant keywords as well as a nonsense observation. The reward structure gives the most reward for choosing actions that satisfy the user request. These actions then lead back to the beginning state. Most other actions are penalised with an equivalent negative amount. However, the confir-mation/clarification actions are penalised lightly (values close to 0), and the motion commands are penalised heavily if taken from the wrong state, to illustrate the difference between an undesirable action that is merely irritating (i.e., giving an inappropriate response) and an action that can be much more costly (e.g., having the robot leave the room at the wrong time, or travel to the wrong destination). Table 2 shows an example dialogue obtained by having an actual user interact with the system on the robot. The left-most column is the emitted observation from the speech recognition system. The operating conditions of the system are fairly poor, since the microphone is on-board the robot and subject to background noise as well as being located some distance from the user. In the final two lines of the script, the robot chooses the correct action after some confirmation questions, despite the fact that the signal from the speech recogniser is both very noisy and also ambiguous, containing cues both for the \"say hello\" response and for robot motion to the kitchen.",
"cite_spans": [],
"ref_spans": [
{
"start": 1002,
"end": 1009,
"text": "Table 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "The Example Domain",
"sec_num": "3"
},
{
"text": "We compared the performance of the three algorithms (conventional MDP, POMDP approximated by the Augmented MDP, and exact POMDP) over the example domain. The metric used was to look at the total reward accumulated over the course of an extended test. In order to perform this full test, the observations and states from the underlying MDP were generated stochastically from the model and then given to the policy. The action taken by the policy was returned to the model, and the policy was rewarded based on the state-action-observation triplet. The experiments were run for a total of 100 dialogues, where each dialogue is considered to be a cycle of observation-action utterances from the start state request_begun through a sequence of states and back to the start state. The time was normalised by the length of each dialogue cycle.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experimental Results",
"sec_num": "4"
},
{
"text": "The exact POMDP policy was generated using the Incremental Improvement algorithm (Cassan- Table 2 : An example dialogue. Note that the robot chooses the correct action in the final two exchanges, even though the utterance is both noisy and ambiguous. dra et al., 1997) . The solver was unable to complete a solution for the full state space, so we created a much smaller dialogue model, with only 7 states and 2 task domains: time and weather information. Figure 3 shows the performance of the three algorithms, over the course of 100 dialogues. Notice that the exact POMDP strategy outperformed both the conventional MDP and approximate POMDP; it accumulated the most reward, and did so with the fastest rate of accumulation. The good performance of the exact POMDP is not surprising because it is an optimal solution for this problem, but time to compute this strategy is high: 729 secs, compared with 1.6 msec for the MDP and 719 msec for the Augmented MDP. Figure 4 demonstrates the algorithms on the full dialogue model as given in Figure 2 . Because of the number of states, no exact POMDP solution could be computed for this problem; the POMDP policy is restricted to the approximate solution.",
"cite_spans": [
{
"start": 251,
"end": 268,
"text": "dra et al., 1997)",
"ref_id": null
}
],
"ref_spans": [
{
"start": 90,
"end": 97,
"text": "Table 2",
"ref_id": null
},
{
"start": 456,
"end": 464,
"text": "Figure 3",
"ref_id": "FIGREF2"
},
{
"start": 961,
"end": 969,
"text": "Figure 4",
"ref_id": "FIGREF3"
},
{
"start": 1037,
"end": 1045,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "The Restricted State Space Problem",
"sec_num": "4.1"
},
{
"text": "The POMDP solution clearly outperforms the conventional MDP strategy, as it more than triples the total accumulated reward over the lifetime of the strategies, although at the cost of taking longer to reach the goal state in each dialogue. Table 3 breaks down the numbers in more detail. The average reward for the POMDP is 18.6 per action, which is the maximum reward for most actions, suggesting that the POMDP is taking the right action about 95% of the time. Furthermore, the average reward per dialogue for the POMDP is 230 compared to 49.7 for the conventional MDP, which suggests that the conventional MDP is making a large number of mistakes in each dialogue.",
"cite_spans": [],
"ref_spans": [
{
"start": 240,
"end": 247,
"text": "Table 3",
"ref_id": "TABREF5"
}
],
"eq_spans": [],
"section": "The Full State Space Problem",
"sec_num": "4.2"
},
{
"text": "Finally, the standard deviation for the POMDP is much narrower, suggesting that this algorithm is getting its rewards much more consistently than the conventional MDP.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The Full State Space Problem",
"sec_num": "4.2"
},
{
"text": "We verified the utility of the POMDP approach by testing the approximating model on human users. The user testing of the robot is still preliminary, and therefore the experiment presented here cannot be considered a rigorous demonstration. However, Table 4 shows some promising results. Again, the POMDP policy is the one provided by the approximating Augmented MDP.",
"cite_spans": [],
"ref_spans": [
{
"start": 249,
"end": 256,
"text": "Table 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Verification of Models on Users",
"sec_num": "4.3"
},
{
"text": "The experiment consisted of having users interact with the mobile robot under a variety of conditions. The users tested both the POMDP and an implementation of a conventional MDP dialogue manager. Both planners used exactly the same model. The users were presented first with one manager, and then the other, although they were not told which manager was first and the order varied from user to user randomly. The user labelled each action from the system as \"Correct\" (+100 reward), \"OK\" (-1 reward) or \"Wrong\" (-100 reward). The \"OK\" label was used for responses by the robot that were questions (i.e., did not satisfy the user request) but were relevant to the request, e.g., a confirmation of TV channel when a TV channel was requested.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Verification of Models on Users",
"sec_num": "4.3"
},
{
"text": "The system performed differently for the three test subjects, compensating for the speech recognition accuracy which varied significantly between them. In user #2's case, the POMDP manager took longer to satisfy the requests, but in general gained more reward per action. This is because the speech recognition system generally had lower word-accuracy for this user, either because the user had unusual speech patterns, or because the acoustic signal was corrupted by background noise.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Verification of Models on Users",
"sec_num": "4.3"
},
{
"text": "By comparison, user #3's results show that in the limit of good sensing, the POMDP policy approaches the MDP policy. This user had a much higher recognition rate from the speech recogniser, and consequently both the POMDP and conventional MDP acquire rewards at equivalent rates, and satisfied requests at similar rates.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Verification of Models on Users",
"sec_num": "4.3"
},
{
"text": "This paper discusses a novel way to view the dialogue management problem. The domain is represented as the partially observable state of the user, where the observations are speech utterances from the user. The POMDP representation inverts the traditional notion of state in dialogue management, treating the state as unknown, but inferrable from the sequences of observations from the user. Our approach allows us to model observations from the user probabilistically, and in particular we can compensate appropriately for more or less reliable observations from the speech recognition system. In the limit of perfect recognition, we achieve the same performance as a conventional MDP dialogue policy. However, as recognition degrades, we can model the effects of actively gathering information from the user to offset the loss of information in the utterance stream.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "In the past, POMDPs have not been used for dialogue management because of the computational complexity involved in solving anything but trivial problems. We avoid this problem by using an Table 4 : A comparison of the rewards accumulated for the two algorithms using the full model on real users, with results given as mean +/-std. dev.",
"cite_spans": [],
"ref_spans": [
{
"start": 188,
"end": 195,
"text": "Table 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "augmented MDP state representation for approximating the optimal policy, which allows us to find a solution that quantitatively outperforms the conventional MDP, while dramatically reducing the time to solution compared to an exact POMDP algorithm (linear vs. exponential in the number of states).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "We have shown experimentally both in simulation and in preliminary user testing that the POMDP solution consistently outperforms the conventional MDP dialogue manager, as a function of erroneous actions during the dialogue. We are able to show with actual users that as the speech recognition performance varies, the dialogue manager is able to compensate appropriately.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "While the results of the POMDP approach to the dialogue system are promising, a number of improvements are needed. The POMDP is overly cautious, refusing to commit to a particular course of action until it is completely certain that it is appropriate. This is reflected in its liberal use of verification questions. This could be avoided by having some non-static reward structure, where information gathering becomes increasingly costly as it progresses.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "The policy is extremely sensitive to the parameters of the model, which are currently set by hand. While learning the parameters from scratch for a full POMDP is probably unnecessary, automatic tuning of the model parameters would definitely add to the utility of the model. For example, the optimality of a policy is strongly dependent on the design of the reward structure. It follows that incorporating a learning component that adapts the reward structure to reflect actual user satisfaction would likely improve performance.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "Although sufficient statistics are usually moments of continuous distributions, our experience has shown that the entropy serves equally well.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "The authors would like to thank Tom Mitchell for his advice and support of this research.Kevin Lenzo and Mathur Ravishankar made our use of Sphinx possible, answered requests for information and made bug fixes willingly. Tony Cassandra was extremely helpful in distributing his POMDP code to us, and answering promptly any questions we had. The assistance of the Nursebot team is also gratefully acknowledged, including the members from the School of Nursing and the Department of Computer Science Intelligent Systems at the University of Pittsburgh.This research was supported in part by Le Fonds pour la Formation de Chercheurs et l'Aid\u00e8 a la Recherche (Fonds FCAR).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgements",
"sec_num": "6"
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Evaluating dialog systems used in the real world",
"authors": [
{
"first": "Harald",
"middle": [],
"last": "Aust",
"suffix": ""
},
{
"first": "Hermann",
"middle": [],
"last": "Ney",
"suffix": ""
}
],
"year": 1998,
"venue": "Proc. IEEE ICASSP",
"volume": "2",
"issue": "",
"pages": "1053--1056",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Harald Aust and Hermann Ney. 1998. Evaluating di- alog systems used in the real world. In Proc. IEEE ICASSP, volume 2, pages 1053-1056.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "The Festival Speech Synthesis System",
"authors": [
{
"first": "A",
"middle": [],
"last": "Black",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Taylor",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Caley",
"suffix": ""
}
],
"year": 1999,
"venue": "",
"volume": "1",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "A. Black, P. Taylor, and R. Caley, 1999. The Festival Speech Synthesis System, 1.4 edition.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Incremental pruning: A simple, fast, exact algorithm for partially observable Markov decision processes",
"authors": [
{
"first": "Anthony",
"middle": [],
"last": "Cassandra",
"suffix": ""
},
{
"first": "Michael",
"middle": [
"L"
],
"last": "Littman",
"suffix": ""
},
{
"first": "Nevin",
"middle": [
"L"
],
"last": "Zhang",
"suffix": ""
}
],
"year": 1997,
"venue": "Proc. 13th Ann. Conf. on Uncertainty in Artificial Intelligence (UAI-97)",
"volume": "",
"issue": "",
"pages": "54--61",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Anthony Cassandra, Michael L. Littman, and Nevin L. Zhang. 1997. Incremental pruning: A simple, fast, exact algorithm for partially observable Markov de- cision processes. In Proc. 13th Ann. Conf. on Un- certainty in Artificial Intelligence (UAI-97), pages 54-61, San Francisco, CA.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Planning and acting in partially observable stochastic domains",
"authors": [
{
"first": "Leslie",
"middle": [],
"last": "Pack Kaelbling",
"suffix": ""
},
{
"first": "Michael",
"middle": [
"L"
],
"last": "Littman",
"suffix": ""
},
{
"first": "Anthony",
"middle": [
"R"
],
"last": "Cassandra",
"suffix": ""
}
],
"year": 1998,
"venue": "Artificial Intelligence",
"volume": "101",
"issue": "",
"pages": "99--134",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Leslie Pack Kaelbling, Michael L. Littman, and An- thony R. Cassandra. 1998. Planning and acting in partially observable stochastic domains. Artificial Intelligence, 101:99-134.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Using Markov decision process for learning dialogue strategies",
"authors": [
{
"first": "Esther",
"middle": [],
"last": "Levin",
"suffix": ""
},
{
"first": "Roberto",
"middle": [],
"last": "Pieraccini",
"suffix": ""
},
{
"first": "Wieland",
"middle": [],
"last": "Eckert",
"suffix": ""
}
],
"year": 1998,
"venue": "Proc. International Conference on Acoustics, Speech and Signal Processing",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Esther Levin, Roberto Pieraccini, and Wieland Eckert. 1998. Using Markov decision process for learning dialogue strategies. In Proc. International Confer- ence on Acoustics, Speech and Signal Processing (ICASSP).",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "A survey of partially observable Markov decision processes",
"authors": [
{
"first": "George",
"middle": [
"E"
],
"last": "Monahan",
"suffix": ""
}
],
"year": 1982,
"venue": "Management Science",
"volume": "28",
"issue": "1",
"pages": "1--16",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "George E. Monahan. 1982. A survey of partially ob- servable Markov decision processes. Management Science, 28(1):1-16.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Dialog control strategy based on the reliability of speech recognition",
"authors": [
{
"first": "Yasuhisa",
"middle": [],
"last": "Niimi",
"suffix": ""
},
{
"first": "Yutaka",
"middle": [],
"last": "Kobayashi",
"suffix": ""
}
],
"year": 1996,
"venue": "Proc. International Conference on Spoken Language Processing (ICSLP)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yasuhisa Niimi and Yutaka Kobayashi. 1996. Dialog control strategy based on the reliability of speech recognition. In Proc. International Conference on Spoken Language Processing (ICSLP).",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Approximating optimal policies for partially observable stochastic domains",
"authors": [
{
"first": "Ronald",
"middle": [],
"last": "Parr",
"suffix": ""
},
{
"first": "Stuart",
"middle": [],
"last": "Russell",
"suffix": ""
}
],
"year": 1995,
"venue": "Proceedings of the 14th International Joint Conferences on Artificial Intelligence",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ronald Parr and Stuart Russell. 1995. Approximating optimal policies for partially observable stochastic domains. In Proceedings of the 14th International Joint Conferences on Artificial Intelligence.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Efficient Algorithms for Speech Recognition",
"authors": [
{
"first": "M",
"middle": [],
"last": "Ravishankar",
"suffix": ""
}
],
"year": 1996,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "M. Ravishankar. 1996. Efficient Algorithms for Speech Recognition. Ph.D. thesis, Carnegie Mel- lon.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Coastal navigation with mobile robots",
"authors": [
{
"first": "Nicholas",
"middle": [],
"last": "Roy",
"suffix": ""
},
{
"first": "Sebastian",
"middle": [],
"last": "Thrun",
"suffix": ""
}
],
"year": 1999,
"venue": "Advances in Neural Processing Systems",
"volume": "12",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Nicholas Roy and Sebastian Thrun. 1999. Coastal navigation with mobile robots. In Advances in Neu- ral Processing Systems, volume 12.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Reinforcement learning for spoken dialog systems",
"authors": [
{
"first": "Satinder",
"middle": [],
"last": "Singh",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Kearns",
"suffix": ""
},
{
"first": "Diane",
"middle": [],
"last": "Litman",
"suffix": ""
},
{
"first": "Marilyn",
"middle": [],
"last": "Walker",
"suffix": ""
}
],
"year": 1999,
"venue": "Advances in Neural Processing Systems",
"volume": "12",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Satinder Singh, Michael Kearns, Diane Litman, and Marilyn Walker. 1999. Reinforcement learning for spoken dialog systems. In Advances in Neural Pro- cessing Systems, volume 12.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "The Optimal Control of Partially Observable Markov Decision Processes",
"authors": [
{
"first": "E",
"middle": [],
"last": "Sondik",
"suffix": ""
}
],
"year": 1971,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "E. Sondik. 1971. The Optimal Control of Partially Observable Markov Decision Processes. Ph.D. the- sis, Stanford University, Stanford, California.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Monte carlo pomdps",
"authors": [
{
"first": "",
"middle": [],
"last": "Sebastian Thrun",
"suffix": ""
}
],
"year": 1999,
"venue": "Advances in Neural Processing Systems",
"volume": "12",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sebastian Thrun. 1999. Monte carlo pomdps. In S. A. Solla, T. K. Leen, and K. R. M\u00fcller, editors, Ad- vances in Neural Processing Systems, volume 12.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Learning optimal dialogue strategies: a case study of a spoken dialogue agent for email",
"authors": [
{
"first": "Marilyn",
"middle": [
"A"
],
"last": "Walker",
"suffix": ""
},
{
"first": "Jeanne",
"middle": [
"C"
],
"last": "Fromer",
"suffix": ""
},
{
"first": "Shrikanth",
"middle": [],
"last": "Narayanan",
"suffix": ""
}
],
"year": 1998,
"venue": "Proc. ACL/COLING'98",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Marilyn A. Walker, Jeanne C. Fromer, and Shrikanth Narayanan. 1998. Learning optimal dialogue strategies: a case study of a spoken dialogue agent for email. In Proc. ACL/COLING'98.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Use of dialogue, pragmatics and semantics to enhance speech recognition",
"authors": [
{
"first": "Sheryl",
"middle": [],
"last": "Young",
"suffix": ""
}
],
"year": 1990,
"venue": "Speech Communication",
"volume": "9",
"issue": "",
"pages": "5--6",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sheryl Young. 1990. Use of dialogue, pragmatics and semantics to enhance speech recognition. Speech Communication, 9(5-6), Dec.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"text": "Florence Nightingale, the prototype nursing home robot used in these experiments.",
"type_str": "figure",
"num": null,
"uris": null
},
"FIGREF1": {
"text": "Time Medication (Medication 1, Medication 2, ..., Medication n) TV Schedules for different channels (ABC, NBC, CBS) Robot Motion Commands (To the kitchen, To the Bedroom)",
"type_str": "figure",
"num": null,
"uris": null
},
"FIGREF2": {
"text": "A comparison of the reward gained over time for the exact POMDP, POMDP approximated by the Augmented MDP, and the conventional MDP for the 7 state problem. In this case, the time is measured in dialogues, or iterations of satisfying user requests.",
"type_str": "figure",
"num": null,
"uris": null
},
"FIGREF3": {
"text": "A comparison of the reward gained over time for the approximate POMDP vs. the conventional MDP for the 13 state problem. Again, the time is measured in number of actions.",
"type_str": "figure",
"num": null,
"uris": null
},
"TABREF2": {
"type_str": "table",
"content": "<table/>",
"html": null,
"text": "The task domains for Flo.",
"num": null
},
"TABREF5": {
"type_str": "table",
"content": "<table><tr><td/><td>POMDP</td><td>Conventional MDP</td></tr><tr><td>User 1 Reward Per Action</td><td>52.2</td><td>24.8</td></tr><tr><td>Errors per request</td><td>0.1 +/-0.09</td><td>0.55 +/-0.44</td></tr><tr><td>Time to fill request</td><td>1.9 +/-0.47</td><td>2.0 +/-1.51</td></tr><tr><td>User 2 Reward Per Action</td><td>36.95</td><td>6.19</td></tr><tr><td>Errors per request</td><td>0.1 +/-0.09</td><td>0.825 +/-1.56</td></tr><tr><td>Time to fill request</td><td>2.5 +/-1.22</td><td>1.86 +/-1.47</td></tr><tr><td>User 3 Reward Per Action</td><td>49.72</td><td>44.95</td></tr><tr><td>Errors per request</td><td>0.18 +/-0.15</td><td>0.36 +/-0.37</td></tr><tr><td>Time to fill request</td><td>1.63 +/-1.15</td><td>1.42 +/-0.63</td></tr></table>",
"html": null,
"text": "A comparison of the rewards accumulated for the two algorithms (approximate POMDP and conventional MDP) using the full model.",
"num": null
}
}
}
} |