File size: 74,934 Bytes
6fa4bc9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 | {
"paper_id": "P01-1037",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T09:29:38.879257Z"
},
"title": "The Role of Lexico-Semantic Feedback in Open-Domain Textual Question-Answering",
"authors": [
{
"first": "Sanda",
"middle": [],
"last": "Harabagiu",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Southern Methodist University Dallas",
"location": {
"postCode": "75275-0122",
"region": "TX"
}
},
"email": "sanda\u00a1@engr.smu.edu"
},
{
"first": "Dan",
"middle": [],
"last": "Moldovan",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Southern Methodist University Dallas",
"location": {
"postCode": "75275-0122",
"region": "TX"
}
},
"email": ""
},
{
"first": "Marius",
"middle": [],
"last": "Pa\u015fca",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Southern Methodist University Dallas",
"location": {
"postCode": "75275-0122",
"region": "TX"
}
},
"email": ""
},
{
"first": "Rada",
"middle": [],
"last": "Mihalcea",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Southern Methodist University Dallas",
"location": {
"postCode": "75275-0122",
"region": "TX"
}
},
"email": ""
},
{
"first": "Mihai",
"middle": [],
"last": "Surdeanu",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Southern Methodist University Dallas",
"location": {
"postCode": "75275-0122",
"region": "TX"
}
},
"email": ""
},
{
"first": "R\u0203zvan",
"middle": [],
"last": "Bunescu",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Southern Methodist University Dallas",
"location": {
"postCode": "75275-0122",
"region": "TX"
}
},
"email": ""
},
{
"first": "Roxana",
"middle": [],
"last": "G\u00eerju",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Southern Methodist University Dallas",
"location": {
"postCode": "75275-0122",
"region": "TX"
}
},
"email": ""
},
{
"first": "Vasile",
"middle": [],
"last": "Rus",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Southern Methodist University Dallas",
"location": {
"postCode": "75275-0122",
"region": "TX"
}
},
"email": ""
},
{
"first": "Paul",
"middle": [],
"last": "Mor\u0203rescu",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Southern Methodist University Dallas",
"location": {
"postCode": "75275-0122",
"region": "TX"
}
},
"email": ""
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "This paper presents an open-domain textual Question-Answering system that uses several feedback loops to enhance its performance. These feedback loops combine in a new way statistical results with syntactic, semantic or pragmatic information derived from texts and lexical databases. The paper presents the contribution of each feedback loop to the overall performance of 76% human-assessed precise answers.",
"pdf_parse": {
"paper_id": "P01-1037",
"_pdf_hash": "",
"abstract": [
{
"text": "This paper presents an open-domain textual Question-Answering system that uses several feedback loops to enhance its performance. These feedback loops combine in a new way statistical results with syntactic, semantic or pragmatic information derived from texts and lexical databases. The paper presents the contribution of each feedback loop to the overall performance of 76% human-assessed precise answers.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Open-domain textual Question-Answering (Q&A), as defined by the TREC competitions 1 , is the task of identifying in large collections of documents a text snippet where the answer to a natural language question lies. The answer is constrained to be found either in a short (50 bytes) or a long (250 bytes) text span. Frequently, keywords extracted from the natural language question are either within the text span or in its immediate vicinity, forming a text paragraph. Since such paragraphs must be identified throughout voluminous collections, automatic and autonomous Q&A systems incorporate an index of the collection as well as a paragraph retrieval mechanism.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Recent results from the TREC evaluations ( (Kwok et al., 2000) (Radev et al., 2000) (Allen et al., 2000) ) show that Information Retrieval (IR) techniques alone are not sufficient for finding answers with high precision. In fact, more and more systems adopt architectures in which the semantics of the questions are captured prior to paragraph retrieval (e.g. (Gaizauskas and Humphreys, 2000) ) and used later in extracting the answer (cf. (Abney et al., 2000) ). When processing a natural language question two goals must be achieved. First we need to know what is the expected answer type; in other words, we need to know what we are looking for. Second, we need to know where to look for the answer, e.g. we must identify the question keywords to be used in the paragraph retrieval.",
"cite_spans": [
{
"start": 43,
"end": 62,
"text": "(Kwok et al., 2000)",
"ref_id": "BIBREF10"
},
{
"start": 63,
"end": 83,
"text": "(Radev et al., 2000)",
"ref_id": "BIBREF14"
},
{
"start": 84,
"end": 104,
"text": "(Allen et al., 2000)",
"ref_id": "BIBREF1"
},
{
"start": 360,
"end": 392,
"text": "(Gaizauskas and Humphreys, 2000)",
"ref_id": null
},
{
"start": 440,
"end": 460,
"text": "(Abney et al., 2000)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The expected answer type is determined based on the question stem, e.g. who, where or how much and eventually one of the question concepts, when the stem is ambiguous (for example what), as described in (Radev et al., 2000) (Srihari and Li, 2000) . However finding question keywords that retrieve all candidate answers cannot be achieved only by deriving some of the words used in the question. Frequently, question reformulations use different words, but imply the same answer. Moreover, many equivalent answers are phrased differently. In this paper we argue that the answer to complex natural language questions cannot be extracted with significant precision from large collections of texts unless several lexico-semantic feedback loops are allowed.",
"cite_spans": [
{
"start": 203,
"end": 223,
"text": "(Radev et al., 2000)",
"ref_id": "BIBREF14"
},
{
"start": 224,
"end": 246,
"text": "(Srihari and Li, 2000)",
"ref_id": "BIBREF15"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In Section 2 we survey the related work whereas in Section 3 we describe the feedback loops that refine the search for correct answers.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Section 4 presents the approach of devising keyword alternations whereas Section 5 details the recognition of question reformulations. Section 6 evaluates the results of the Q&A system and Section 7 summarizes the conclusions.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Mechanisms for open-domain textual Q&A were not discovered in the vacuum. The 90s witnessed a constant improvement of IR systems, determined by the availability of large collections of texts and the TREC evaluations. In parallel, Information Extraction (IE) techniques were developed under the TIPSTER Message Understanding Conference (MUC) competitions. Typically, IE systems identify information of interest in a text and map it to a predefined, target representation, known as template. Although simple combinations of IR and IE techniques are not practical solutions for open-domain textual Q&A because IE systems are based on domain-specific knowledge, their contribution to current open-domain Q&A methods is significant. For example, stateof-the-art Named Entity (NE) recognizers developed for IE systems were readily available to be incorporated in Q&A systems and helped recognize names of people, organizations, locations or dates.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related work",
"sec_num": "2"
},
{
"text": "Assuming that it is very likely that the answer is a named entity, (Srihari and Li, 2000) describes a NE-supported Q&A system that functions quite well when the expected answer type is one of the categories covered by the NE recognizer. Unfortunately this system is not fully autonomous, as it depends on IR results provided by external search engines. Answer extractions based on NE recognizers were also developed in the Q&A presented in (Abney et al., 2000) (Radev et al., 2000) (Gaizauskas and Humphreys, 2000) . As noted in (Voorhees and Tice, 2000) , Q&A systems that did not include NE recognizers performed poorly in the TREC evaluations, especially in the short answer category. Some Q&A systems, like (Moldovan et al., 2000) relied both on NE recognizers and some empirical indicators.",
"cite_spans": [
{
"start": 67,
"end": 89,
"text": "(Srihari and Li, 2000)",
"ref_id": "BIBREF15"
},
{
"start": 440,
"end": 460,
"text": "(Abney et al., 2000)",
"ref_id": "BIBREF0"
},
{
"start": 461,
"end": 481,
"text": "(Radev et al., 2000)",
"ref_id": "BIBREF14"
},
{
"start": 482,
"end": 514,
"text": "(Gaizauskas and Humphreys, 2000)",
"ref_id": null
},
{
"start": 529,
"end": 554,
"text": "(Voorhees and Tice, 2000)",
"ref_id": "BIBREF16"
},
{
"start": 711,
"end": 734,
"text": "(Moldovan et al., 2000)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related work",
"sec_num": "2"
},
{
"text": "However, the answer does not always belong to a category covered by the NE recognizer. For such cases several approaches have been developed. The first one, presented in , the answer type is derived from a large answer taxonomy. A different approach, based on statistical techniques was proposed in (Radev et al., 2000) . (Cardie et al., 2000) presents a method of extracting answers as noun phrases in a novel way. Answer extraction based on grammatical information is also promoted by the system described in (Clarke et al., 2000) .",
"cite_spans": [
{
"start": 299,
"end": 319,
"text": "(Radev et al., 2000)",
"ref_id": "BIBREF14"
},
{
"start": 322,
"end": 343,
"text": "(Cardie et al., 2000)",
"ref_id": "BIBREF2"
},
{
"start": 511,
"end": 532,
"text": "(Clarke et al., 2000)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related work",
"sec_num": "2"
},
{
"text": "One of the few Q&A systems that takes into account morphological, lexical and semantic alternations of terms is described in (Ferret et al., 2000) . To our knowledge, none of the current open-domain Q&A systems use any feedback loops to generate lexico-semantic alternations. This paper shows that such feedback loops enhance significantly the performance of opendomain textual Q&A systems.",
"cite_spans": [
{
"start": 125,
"end": 146,
"text": "(Ferret et al., 2000)",
"ref_id": "BIBREF5"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related work",
"sec_num": "2"
},
{
"text": "Before initiating the search for the answer to a natural language question we take into account the fact that it is very likely that the same question or a very similar one has been posed to the system before, and thus those results can be used again. To find such cached questions, we measure the similarity to the previously processed questions and when a reformulation is identified, the system returns the corresponding cached correct answer, as illustrated in Figure 1 .",
"cite_spans": [],
"ref_spans": [
{
"start": 465,
"end": 473,
"text": "Figure 1",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "Textual Q&A Feedback Loops",
"sec_num": "3"
},
{
"text": "When no reformulations are detected, the search for answers is based on the conjecture that the eventual answer is likely to be found in a text paragraph that (a) contains the most representative question concepts and (b) includes a textual concept of the same category as the expected answer. Since the current retrieval technology does not model semantic knowledge, we break down this search into a boolean retrieval, based on some question keywords and a filtering mechanism, that retains only those passages containing the expected answer type. Both the question keywords and the expected answer type are identified by using the dependencies derived from the question parse.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Textual Q&A Feedback Loops",
"sec_num": "3"
},
{
"text": "By implementing our own version of the publicly available Collins parser (Collins, 1996) , we also learned a dependency model that enables the mapping of parse trees into sets of binary relations between the head-word of each constituent and its sibling-words. For example, the parse tree of TREC-9 question Q210: \"How many dogs pull a sled in the Iditarod ?\" is: For each possible constituent in a parse tree, rules first described in (Magerman, 1995) and (Jelinek et al., 1994 ) identify the head-child and propagate the head-word to its parent. For the parse of question Q210 the propagation is:",
"cite_spans": [
{
"start": 73,
"end": 88,
"text": "(Collins, 1996)",
"ref_id": "BIBREF4"
},
{
"start": 436,
"end": 452,
"text": "(Magerman, 1995)",
"ref_id": "BIBREF11"
},
{
"start": 457,
"end": 478,
"text": "(Jelinek et al., 1994",
"ref_id": "BIBREF9"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Textual Q&A Feedback Loops",
"sec_num": "3"
},
{
"text": "NP (sled) DT NN DT IN many How WRB dogs NNS JJ NP (dogs) VBP pull a sled in the Iditarod NNP (Iditarod) NP (Iditarod) PP (Iditarod) NP (sled) VP (pull) S (pull)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Textual Q&A Feedback Loops",
"sec_num": "3"
},
{
"text": "When the propagation is over, head-modifier relations are extracted, generating the following dependency structure, called question semantic form in .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Textual Q&A Feedback Loops",
"sec_num": "3"
},
{
"text": "dogs Iditarod COUNT pull sled",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Textual Q&A Feedback Loops",
"sec_num": "3"
},
{
"text": "In the structure above, COUNT represents the expected answer type, replacing the question stem \"how many\". Few question stems are unambiguous (e.g. who, when). If the question stem is ambiguous, the expected answer type is determined by the concept from the question semantic form that modifies the stem. This concept is searched in an ANSWER TAXONOMY comprising several tops linked to a significant number of WordNet noun and verb hierarchies. Each top represents one of the possible expected answer types implemented in our system (e.g. PERSON, PRODUCT, NUMERICAL VALUE, COUNT, LOCATION). We encoded a total of 38 possible answer types.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Textual Q&A Feedback Loops",
"sec_num": "3"
},
{
"text": "In addition, the question keywords used for paragraph retrieval are also derived from the question semantic form. The question keywords are organized in an ordered list which first enumer-ates the named entities and the question quotations, then the concepts that triggered the recognition of the expected answer type followed by all adjuncts, in a left-to-right order, and finally the question head. The conjunction of the keywords represents the boolean query applied to the document index. (Moldovan et al., 2000) It is well known that one of the disadvantages of boolean retrieval is that it returns either too many or too few documents. However, for question answering, this is an advantage, exploited by the first feedback loop represented in Figure 1 . Feedback loop 1 is triggered when the number of retrieved paragraphs is either smaller than a minimal value or larger than a maximal value determined beforehand for each answer type. Alternatively, when the number of paragraphs is within limits, those paragraphs that do not contain at least one concept of the same semantic category as the expected answer type are filtered out. The remaining paragraphs are parsed and their dependency structures, called answer semantic forms, are derived. Figure 1 is activated when the question semantic form and the answer semantic form cannot by unified. The unification involves three steps:",
"cite_spans": [
{
"start": 493,
"end": 516,
"text": "(Moldovan et al., 2000)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [
{
"start": 749,
"end": 757,
"text": "Figure 1",
"ref_id": "FIGREF1"
},
{
"start": 1252,
"end": 1260,
"text": "Figure 1",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "Textual Q&A Feedback Loops",
"sec_num": "3"
},
{
"text": "Step 1: The recognition of the expected answer type. The first step marks all possible concepts that are answer candidates. For example, in the case of TREC -9 question Q243: \"Where did the ukulele originate ?\", the expected answer type is LOCATION. In the paragraph \"the ukulele introduced from Portugal into the Hawaiian islands\" contains two named entities of the category LO-CATION and both are marked accordingly.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "%",
"sec_num": null
},
{
"text": "Step 2: The identification of the question concepts. The second step identifies the question words, their synonyms, morphological derivations or WordNet hypernyms in the answer semantic form.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "%",
"sec_num": null
},
{
"text": "Step 3: The assessment of the similarities of dependencies. In the third step, two classes of similar dependencies are considered, generating unifications of the question and answer semantic forms: & Class L2-1: there is a one-to-one mapping between the binary dependencies of the question and binary dependencies from the answer semantic form. Moreover, these dependencies largely cover the question semantic form 2 . An example is: We find an entailment between producing, or making and selling goods, derived from Word-Net, since synset ' make, produce, create( has the genus manufacture, defined in the gloss of its homomorphic nominalization as \"for sale\". Therefore the semantic form of question Q261 and its illustrated answer are similar. Nouns head and government are constituents of a possible paraphrase of president, i.e. \"head of government\". However, only world knowledge can justify the answer, since there are countries where the prime minister is the head of government. Presupposing this inference, the semantic form of the question and answer are similar. Feedback loop 3 from Figure 1 brings forward additional semantic information. Two classes of similar dependencies are considered for the abduction of answers, performed in a manner similar to the justifications described in . ",
"cite_spans": [],
"ref_spans": [
{
"start": 1096,
"end": 1104,
"text": "Figure 1",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "%",
"sec_num": null
},
{
"text": "Class L3-2: Paraphrases and additional information produce significant differences between the question semantic form and the answer semantic form. However, semantic information contributes to the normalization of the answer dependencies until they can be unified with the question dependencies. For example, if (a) a volcano IS-A mountain; (b) lava IS-PART of volcano, and moreover it is a part coming from the inside; and (c) fragments of lava have all the properties of lava, the following question semantic form and answer semantic form can be unified: The semantic information and the world knowledge needed for the above unifications are available from WordNet (Miller, 1995) . Moreover, this knowledge can be translated in axiomatic form and used for abductive proofs. Each of the feedback loops provide the retrieval engine with new alternations of the question keywords. Feedback loop 2 considers morphological and lexical alternations whereas Feedback loop 3 uses semantic alternations. The method of generating the alternations is detailed in Section 4.",
"cite_spans": [
{
"start": 667,
"end": 681,
"text": "(Miller, 1995)",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "&",
"sec_num": null
},
{
"text": "To enhance the chance of finding the answer to a question, each feedback loop provides with a different set of keyword alternations. Such alternations can be classified according to the linguistic knowledge they are based upon: 1.Morphological Alternations.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Keyword Alternations",
"sec_num": "4"
},
{
"text": "When lexical alternations are necessary because no answer was found yet, the first keyword that is altered is determined by the question word that either prompted the expected answer type or is in the same semantic class with the expected answer type. For example, in the case of question Q209: \"Who invented the paper clip ?\", the expected answer type is PERSON and so is the subject of the verb invented , lexicalized as the nominalization inventor. Moreover, since our retrieval mechanism does not stem keywords, all the inflections of the verb are also considered. Therefore, the initial query is expanded into: WordNet encodes a wealth of semantic information that is easily mined. Seven types of semantic relations span concepts, enabling the retrieval of synonyms and other semantically related terms. Such alternations improve the recall of the answer paragraphs. For example, in the case of question Q221: \"Who killed Martin Luther King ?\", by considering the synonym of killer, the noun assassin, the Q&A system retrieved paragraphs with the correct answer. Similarly, for the question Q206: \"How far is the moon ?\", since the adverb far is encoded in WordNet as being an attribute of distance, by adding this noun to the retrieval keywords, a correct answer is found. 3. Semantic Alternations and Paraphrases. We define as semantic alternations of a keyword those words or collocations from WordNet that (a) are not members of any WordNet synsets containing the original keyword; and (b) have a chain of WordNet relations or bigram relations that connect it to the original keyword. These relations can be translated in axiomatic form and thus participate to the abductive backchaining from the answer to the question -to justify the answer. For example semantic alternations involving only WordNet relations were used in the case of question Q258: \"Where do lobsters like to live ?\". Since in WordNet the verb prefer has verb like as a hypernym, and moreover, its glossed definition is liking better, the query becomes: QUERY(Q258):) lobsters AND (like OR prefer) AND live 0 Sometimes multiple keywords are replaced by a semantic alternation. Sometimes these alternations are similar to the relations between multiterm paraphrases and single terms, other time they simply are semantically related terms. In the case of question Q210: \"How many dogs pull a sled in the Iditarod ?\", since the definition of Word-Net sense 2 of noun harness contains the bigram \"pull cart\" and both sled and cart are forms of vehicles, the alternation of the pair of keywords 1 pull, slide2 is rendered by harness. Only when this feedback is received, the paragraph containing the correct answer is retrieved.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Keyword Alternations",
"sec_num": "4"
},
{
"text": "To decide which keywords should be expanded and what form of alternations should be used we rely on a set of heuristics which complement the heuristics that select the question keywords and generate the queries (as described in (Moldovan et al., 2000) ): Heuristic 1: Whenever the first feedback loop requires the addition of the main verb of the question as a query keyword, generate all verb conjugations as well as its nominalizations. Heuristic 2: Whenever the second feedback loop requires lexical alternations, collect from Word-Net all the synset elements of the direct hypernyms and direct hyponyms of verbs and nominalizations that are used in the query. If multiple verbs are used, expand them in a left-to-right order. Heuristic 3: Whenever the third feedback loop imposes semantic alternations expressed as paraphrases, if a verb and its direct object from the question are selected as query keywords, search for other verb-object pairs semantically related to the query pair. When new pairs are located in the glosses of a synset 3 , expand the query verbobject pair with all the elements from 3 . Another set of possible alternations, defined by the existence of lexical relations between pairs of words from different question are used to detect question reformulations. The advantage of these different forms of alternations is that they enable the resolution of similar questions through answer caching instead of normal Q&A processing.",
"cite_spans": [
{
"start": 228,
"end": 251,
"text": "(Moldovan et al., 2000)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Keyword Alternations",
"sec_num": "4"
},
{
"text": "In TREC-9 243 questions were reformulations of 54 inquiries, thus asking for the same answer. The reformulation classes contained variable number of questions, ranging from two to eight questions. Two examples of reformulation classes are listed in Table 1 . To classify questions in reformulation groups, we used the algorithm: In Figure 2 we represent the similarity matrix for six questions that were successively posed to the answer engine. Since question reformulations are transitive relations, if at a step ",
"cite_spans": [],
"ref_spans": [
{
"start": 249,
"end": 256,
"text": "Table 1",
"ref_id": "TABREF7"
},
{
"start": 332,
"end": 340,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Question Reformulations",
"sec_num": "5"
},
{
"text": ". Figure 2 illustrates the transitive closures for reformulations at each of the five steps from the succession of six questions. To be noted that at step 4 no new similarities were found , thus Step 2: {Q1, Q2} {Q3}",
"cite_spans": [],
"ref_spans": [
{
"start": 2,
"end": 10,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "E",
"sec_num": null
},
{
"text": "Step 3: {Q1, Q2, Q4} {Q3}",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "E",
"sec_num": null
},
{
"text": "Step 1: {Q1, Q2}",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "E",
"sec_num": null
},
{
"text": "Step 5: {Q1, Q2, Q4, Q5, Q6} {Q3} Figure 2 : Building reformulation classes with a similarity matrix.",
"cite_spans": [],
"ref_spans": [
{
"start": 34,
"end": 42,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "E",
"sec_num": null
},
{
"text": "The algorithm that measures the similarity between two questions is:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "E",
"sec_num": null
},
{
"text": "Algorithm Similarity(Q, Q') Input: a pair of question represented as two word strings: Q:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "E",
"sec_num": null
},
{
"text": "R T S \u00a7 R V U V W X W X W Y R",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "E",
"sec_num": null
},
{
"text": "and Q':",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "E",
"sec_num": null
},
{
"text": "R V a S R V a U W b W X W ! R V \u00e0 W X W b W Y R c",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "E",
"sec_num": null
},
{
"text": "1. Apply a part-of-speech tagger on both questions: Tag(Q):",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "E",
"sec_num": null
},
{
"text": "R T S e d Y f h g p i q S # R V U r d Y f h g s i t U W X W X W ! R u d v f w g p i x",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "E",
"sec_num": null
},
{
"text": "Tag(Q'): The Lexical relation between a pair of content words is initially considered to be a string identity. In later loops starting at step 3 one of the following three possible relaxations of Lexical relation are allowed: (a) common morphological root (e.g. owner and owns, from question Q742: \"Who is the owner of CNN ?\" and question Q417: \"Who owns CNN ?\" respectively); (b) WordNet synonyms (e.g. gestation and pregnancy from question Q763: \"How long is human gestation ?\" and question Q765: \"A normal human pregnancy lasts how many months ?\", respectively) or (c) WordNet hypernyms (e.g. the verbs erect and build from question Q814: \"When was Berlin's Brandenburg gate erected ?\" and question Q397: \"When was the Brandenburg Gate in Berlin built ?\" respectively). Table 2 : NIST-evaluated performance NIST for the system on which we evaluated the role of lexico-semantic feedbacks. Table 3 lists the quantitative analysis of the feedback loops. Loop 1 was generated more often than any other loop. However, the small overall average number of feedback loops that have been carried out indicate that the fact they port little overhead to the Q&A system. More interesting is the qualitative analysis of the effect of the feedback loops on the Q&A evaluation. Overall, the precision increases substantially when all loops were enabled, as illustrated in Table 4 Individually, the effect of Loop 1 has an accuracy increase of over 40%, the effect of Loop 2 had an enhancement of more than 52% while Loop 3 produced an enhancement of only 8%. Table 4 lists also the combined effect of the feed-backs, showing that when all feedbacks are enabled, for short answers we obtained an MRAR of 0.568, i.e. 76% increase over Q&A without feedbacks. The MRAR for long answers had a similar increase of 91%. Because we also used the answer caching technique, we gained more than 1% for short answers and almost 3% for long answers, obtaining the result listed in Table 2 . In our experiments, from the total of 890 TREC questions, lexical alternations were used for 129 questions and the semantic alternations were needed only for 175 questions.",
"cite_spans": [],
"ref_spans": [
{
"start": 773,
"end": 780,
"text": "Table 2",
"ref_id": null
},
{
"start": 891,
"end": 898,
"text": "Table 3",
"ref_id": "TABREF10"
},
{
"start": 1360,
"end": 1367,
"text": "Table 4",
"ref_id": "TABREF12"
},
{
"start": 1956,
"end": 1963,
"text": "Table 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "E",
"sec_num": null
},
{
"text": "R a S d Y f h g p i a S R a U d Y f h g s i a U W X W X W ! R c T d v f w g p i a c",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "E",
"sec_num": null
},
{
"text": "This paper has presented a Q&/A system that employs several feedback mechanisms that provide lexical and semantic alternations to the question keywords. By relying on large, open-domain linguistic resources such as WordNet we enabled a more precise approach of searching and mining answers from large collections of texts. Evaluations indicate that when all three feedback loops are enabled we reached an enhancement of almost 76% for short answers and 91% for long answers, respectively, over the case when there are no feedback loops. In addition, a small increase is produced by relying on cached answers of similar questions. Our results so far indicate that the usage of feedback loops that produce alternations is significantly more efficient than multiword indexing or annotations of large corpora with predicate-argument information.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "7"
},
{
"text": "The Text REtrieval Conference (TREC) is a series of workshops organized by the National Institute of Standards and Technology (NIST), designed to advance the state-ofthe-art in information retrieval(IR)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Some modifiers might be missing from the answer.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "To evaluate the role of lexico-semantic feedback loops in an open-domain textual Q&A system we have relied on the 890 questions employed in the TREC-8 and TREC-9 Q&A evaluations. In TREC, for each question the performance was computed by the reciprocal value of the rank (RAR) of the highest-ranked correct answer given by the system. Given that only the first five answers were considered in the TREC evaluations, i f the RAR is defined asits value is 1 if the first answer is correct; 0.5 if the second answer was correct, but not the first one; 0.33 when the correct answer was on the third position; 0.25 if the fourth answer was correct; 0.2 when the fifth answer was correct and 0 if none of the first five answers were correct. The Mean Reciprocal Answer Rank (MRAR) is used to compute the overall performance of the systems participating in the TREC evaluationIn addition, TREC-9 imposed the constraint that an answer is considered correct only when the textual context from the document that contains it can account for it. When the human assessors were convinced this constraint was satisfied, they considered the RAR to be strict, otherwise, the RAR was considered lenient. ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Performance evaluation",
"sec_num": "6"
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Answer extraction",
"authors": [
{
"first": "Steve",
"middle": [],
"last": "Abney",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Collins",
"suffix": ""
},
{
"first": "Amit",
"middle": [],
"last": "Singhal",
"suffix": ""
}
],
"year": 2000,
"venue": "Proceedings of the 6th Applied Natural Language Processing Conference (ANLP-2000)",
"volume": "",
"issue": "",
"pages": "296--301",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Steve Abney, Michael Collins, and Amit Singhal. Answer extraction. In Proceedings of the 6th Applied Natural Language Processing Conference (ANLP-2000), pages 296-301, Seattle, Washington, 2000.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "INQUERY in TREC-9",
"authors": [
{
"first": "James",
"middle": [],
"last": "Allen",
"suffix": ""
},
{
"first": "Margaret",
"middle": [],
"last": "Connell",
"suffix": ""
},
{
"first": "W",
"middle": [
"Bruce"
],
"last": "Croft",
"suffix": ""
},
{
"first": "Fan-Fang",
"middle": [],
"last": "Feng",
"suffix": ""
},
{
"first": "David",
"middle": [],
"last": "Fisher",
"suffix": ""
},
{
"first": "Xioayan",
"middle": [],
"last": "Li",
"suffix": ""
}
],
"year": 2000,
"venue": "Proceedings of the Text Retrieval Conference (TREC-9",
"volume": "",
"issue": "",
"pages": "504--510",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "James Allen, Margaret Connell, W. Bruce Croft, Fan-Fang Feng, David Fisher and Xioayan Li. INQUERY in TREC-9. Proceedings of the Text Retrieval Conference (TREC-9), pages 504-510, 2000.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Examining the role of statistical and linguistic knowledge sources in a general-knowledge que stion answering system",
"authors": [
{
"first": "Claire",
"middle": [],
"last": "Cardie",
"suffix": ""
},
{
"first": "Vincent",
"middle": [],
"last": "Ng",
"suffix": ""
},
{
"first": "David",
"middle": [],
"last": "Pierce",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "Buckley",
"suffix": ""
}
],
"year": 2000,
"venue": "Proceedings of the 6th Applied Natural Language Processing Conference (ANLP-2000)",
"volume": "",
"issue": "",
"pages": "180--187",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Claire Cardie, Vincent Ng, David Pierce, Chris Buckley. Ex- amining the role of statistical and linguistic knowledge sources in a general-knowledge que stion answering sys- tem. In Proceedings of the 6th Applied Natural Lan- guage Processing Conference (ANLP-2000), pages 180- 187, Seattle, Washington, 2000.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Question Answering by passage selection",
"authors": [
{
"first": "C",
"middle": [
"L"
],
"last": "Clarke",
"suffix": ""
},
{
"first": "Gordon",
"middle": [
"V"
],
"last": "Cormak",
"suffix": ""
},
{
"first": "D",
"middle": [
"I E"
],
"last": "Kisman",
"suffix": ""
},
{
"first": "T",
"middle": [
"R"
],
"last": "Lynam",
"suffix": ""
}
],
"year": 2000,
"venue": "Proceedings of the Text Retrieval Conference (TREC-9",
"volume": "",
"issue": "",
"pages": "65--76",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "C.L. Clarke, Gordon V. Cormak, D.I.E. Kisman and T.R. Ly- nam. Question Answering by passage selection. Pro- ceedings of the Text Retrieval Conference (TREC-9), pages 65-76, 2000.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "A New Statistical Parser Based on Bigram Lexical Dependencies",
"authors": [
{
"first": "Michael",
"middle": [],
"last": "Collins",
"suffix": ""
}
],
"year": 1996,
"venue": "Proceedings of the 34th Annual Meeting of the Association for Computational Linguistics, ACL-96",
"volume": "",
"issue": "",
"pages": "184--191",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Michael Collins. A New Statistical Parser Based on Bigram Lexical Dependencies. In Proceedings of the 34th An- nual Meeting of the Association for Computational Lin- guistics, ACL-96, pages 184-191, 1996.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "QALC-the question-answering system of LIMSI-CNRS",
"authors": [
{
"first": "Olivier",
"middle": [],
"last": "Ferret",
"suffix": ""
},
{
"first": "Brigitte",
"middle": [],
"last": "Grau",
"suffix": ""
},
{
"first": "Martine",
"middle": [],
"last": "Hurault-Plantet",
"suffix": ""
},
{
"first": "Gabriel",
"middle": [],
"last": "Illouz",
"suffix": ""
},
{
"first": "Christian",
"middle": [],
"last": "Jacquemin",
"suffix": ""
},
{
"first": "Nicolas",
"middle": [],
"last": "Masson",
"suffix": ""
},
{
"first": "Paule",
"middle": [],
"last": "Lecuyer",
"suffix": ""
}
],
"year": 2000,
"venue": "Proceedings of the Text Retrieval Conference (TREC-9",
"volume": "",
"issue": "",
"pages": "316--326",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Olivier Ferret, Brigitte Grau, Martine Hurault-Plantet, Gabriel Illouz, Christian Jacquemin, Nicolas Masson and Paule Lecuyer. QALC-the question-answering system of LIMSI-CNRS. Proceedings of the Text Retrieval Confer- ence (TREC-9), pages 316-326, 2000.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "A combined IR/NLP approach to question answering against large text collections",
"authors": [
{
"first": "Robert",
"middle": [],
"last": "Gaizauskas",
"suffix": ""
},
{
"first": "Kevin",
"middle": [],
"last": "Humphreys",
"suffix": ""
}
],
"year": null,
"venue": "Proceedings of the 6th",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Robert Gaizauskas and Kevin Humphreys. A com- bined IR/NLP approach to question answering against large text collections. In Proceedings of the 6th",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Content-Based Multimedia Information Access Conference (RIAO-2000)",
"authors": [],
"year": 2000,
"venue": "",
"volume": "",
"issue": "",
"pages": "1288--1304",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Content-Based Multimedia Information Access Confer- ence (RIAO-2000), pages 1288-1304, Paris, France, 2000.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Experiments with Open-Domain Textual Question Answering",
"authors": [
{
"first": "Sanda",
"middle": [],
"last": "Harabagiu",
"suffix": ""
},
{
"first": "Marius",
"middle": [],
"last": "Pa\u015fca",
"suffix": ""
},
{
"first": "Steven",
"middle": [],
"last": "Maiorano",
"suffix": ""
}
],
"year": 2000,
"venue": "the Proceedings of the 18th International Conference on Computational Linguistics (COLING-2000)",
"volume": "",
"issue": "",
"pages": "292--298",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sanda Harabagiu, Marius Pa\u015fca and Steven Maiorano. Ex- periments with Open-Domain Textual Question Answer- ing. In the Proceedings of the 18th International Con- ference on Computational Linguistics (COLING-2000), pages 292-298, 2000.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Decision tree parsing using a hidden derivational model",
"authors": [
{
"first": "Frederick",
"middle": [],
"last": "Jelinek",
"suffix": ""
},
{
"first": "John",
"middle": [],
"last": "Lafferty",
"suffix": ""
},
{
"first": "Dan",
"middle": [],
"last": "Magerman",
"suffix": ""
},
{
"first": "Robert",
"middle": [],
"last": "Mercer",
"suffix": ""
}
],
"year": 1994,
"venue": "Proceedings of the 1994 Human Language Technology Workshop",
"volume": "",
"issue": "",
"pages": "272--277",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Frederick Jelinek, John Lafferty, Dan Magerman, Robert Mercer, Adwait Ratnaparkhi and Selim Roukos. Deci- sion tree parsing using a hidden derivational model. In Proceedings of the 1994 Human Language Technology Workshop, pages 272-277, 1994.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "TREC-9 Cross Language, Web and Question-Answering Track Experiments using PIRCS",
"authors": [
{
"first": "K",
"middle": [
"L"
],
"last": "Kwok",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "Grunfeld",
"suffix": ""
},
{
"first": "N",
"middle": [],
"last": "Dinstl",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Chan",
"suffix": ""
}
],
"year": 2000,
"venue": "Proceedings of the Text Retrieval Conference (TREC-9",
"volume": "",
"issue": "",
"pages": "26--35",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "K.L. Kwok, L. Grunfeld, N. Dinstl and M. Chan. TREC- 9 Cross Language, Web and Question-Answering Track Experiments using PIRCS. Proceedings of the Text Re- trieval Conference (TREC-9), pages 26-35, 2000.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Statistical decision-tree models of parsing",
"authors": [
{
"first": "Dan",
"middle": [],
"last": "Magerman",
"suffix": ""
}
],
"year": 1995,
"venue": "Proceedings of the 33rd Annual Meeting of the Association for Computational Linguistics, ACL-95",
"volume": "",
"issue": "",
"pages": "276--283",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Dan Magerman. Statistical decision-tree models of parsing. In Proceedings of the 33rd Annual Meeting of the Associ- ation for Computational Linguistics, ACL-95, pages 276- 283, 1995.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "WordNet: A Lexical Database",
"authors": [
{
"first": "George",
"middle": [
"A"
],
"last": "Miller",
"suffix": ""
}
],
"year": 1995,
"venue": "November",
"volume": "38",
"issue": "",
"pages": "39--41",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "George A. Miller. WordNet: A Lexical Database. Commu- nication of the ACM, vol 38: No11, pages 39-41, Novem- ber 1995.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "The Structure and Performance of an Open-Domain Question Answering System",
"authors": [
{
"first": "Dan",
"middle": [],
"last": "Moldovan",
"suffix": ""
},
{
"first": "Sanda",
"middle": [],
"last": "Harabagiu",
"suffix": ""
},
{
"first": "Marius",
"middle": [],
"last": "Pa\u015fca",
"suffix": ""
},
{
"first": "Rada",
"middle": [],
"last": "Mihalcea",
"suffix": ""
},
{
"first": "Richard",
"middle": [],
"last": "Goodrum",
"suffix": ""
},
{
"first": "Roxana",
"middle": [],
"last": "G\u00eerju",
"suffix": ""
},
{
"first": "Vasile",
"middle": [],
"last": "Rus",
"suffix": ""
}
],
"year": 2000,
"venue": "Proceedings of the 38th Annual Meeting of the Association for Comoutational Linguistics (ACL-2000)",
"volume": "",
"issue": "",
"pages": "563--570",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Dan Moldovan, Sanda Harabagiu, Marius Pa\u015fca, Rada Mihalcea, Richard Goodrum, Roxana G\u00eerju and Vasile Rus. The Structure and Performance of an Open-Domain Question Answering System. Proceedings of the 38th Annual Meeting of the Association for Comoutational Linguistics (ACL-2000), pages 563-570, 2000.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Ranking suspected answers to natural language questions using predictive annotation",
"authors": [
{
"first": "Dragomir",
"middle": [],
"last": "Radev",
"suffix": ""
},
{
"first": "John",
"middle": [],
"last": "Prager",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Samn",
"suffix": ""
}
],
"year": 2000,
"venue": "Proceedings of the 6th Applied Natural Language Processing Conference (ANLP-2000)",
"volume": "",
"issue": "",
"pages": "150--157",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Dragomir Radev, John Prager, and V. Samn. Ranking sus- pected answers to natural language questions using pre- dictive annotation. In Proceedings of the 6th Applied Natural Language Processing Conference (ANLP-2000), pages 150-157, Seattle, Washington, 2000.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "A question answering system supported by information extraction",
"authors": [
{
"first": "Rohini",
"middle": [],
"last": "Srihari",
"suffix": ""
},
{
"first": "W",
"middle": [],
"last": "Li",
"suffix": ""
}
],
"year": 2000,
"venue": "Proceedings of the 6th Applied Natural Language Processing Conference (ANLP-2000)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Rohini Srihari and W. Li. A question answering system supported by information extraction. In Proceedings of the 6th Applied Natural Language Processing Conference (ANLP-2000), Seattle, Washington, 2000.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Building a question answering test collection",
"authors": [
{
"first": "Ellen",
"middle": [
"M"
],
"last": "Voorhees",
"suffix": ""
},
{
"first": "Dawn",
"middle": [],
"last": "Tice",
"suffix": ""
}
],
"year": 2000,
"venue": "Proceedings of the 23rd Annual International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR-2000)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ellen M. Voorhees and Dawn Tice. Building a question an- swering test collection. In Proceedings of the 23rd An- nual International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR-2000), Athens, Greece, 2000.",
"links": null
}
},
"ref_entries": {
"FIGREF1": {
"num": null,
"type_str": "figure",
"uris": null,
"text": "Feedbacks for the Answer Search."
},
"FIGREF2": {
"num": null,
"type_str": "figure",
"uris": null,
"text": "QUERY(Q209):) paper AND clip AND (invented ORinventor OR invent OR invents)0 2. Lexical Alternations."
},
"TABREF0": {
"html": null,
"content": "<table><tr><td colspan=\"2\">\" \u00a5 # ! $</td><td>\u00a3</td><td/></tr><tr><td colspan=\"4\">Question</td><td>Yes</td><td>Cached Questions</td></tr><tr><td colspan=\"5\">REFORMULATION</td><td>Cached Answers</td></tr><tr><td/><td/><td/><td colspan=\"2\">No</td></tr><tr><td/><td/><td/><td>PARSE</td></tr><tr><td/><td/><td/><td colspan=\"2\">Expected Answer Type</td></tr><tr><td colspan=\"5\">Question Keywords</td></tr><tr><td>\u00a7</td><td colspan=\"2\">\u00a3</td><td>! \u00a5</td><td>Retrieval</td></tr><tr><td/><td/><td/><td colspan=\"2\">Index</td><td>LOOP 1</td></tr><tr><td/><td/><td/><td/><td>Min<Number Paragraphs<Max No</td></tr><tr><td/><td/><td/><td>Yes</td></tr><tr><td colspan=\"5\">Expected Answer Type</td></tr><tr><td colspan=\"4\">in paragraph Yes</td><td>No</td><td>LOOP 2 Filter out paragraph</td></tr><tr><td colspan=\"5\">Answer Semantic Form Question Semantic Form S-UNIFICATIONS</td><td>No</td><td>Lexical Alternations</td></tr><tr><td colspan=\"2\">Yes</td><td/><td/><td>LOOP 3</td></tr><tr><td colspan=\"5\">Question Logical Form</td><td>No</td><td>Semantic</td></tr><tr><td colspan=\"5\">ABDUCTIVE PROOF</td><td>Alternations</td></tr><tr><td colspan=\"5\">Answer Logical Form</td></tr><tr><td/><td/><td/><td colspan=\"2\">Yes</td></tr><tr><td>\u00a2 \u00a4 \u00a3 \u00a4 \u00a5 \u00a7 \u00a6</td><td>\u00a9</td><td/><td/></tr></table>",
"num": null,
"text": "details the empirical methods used in our system for transforming a natural language question into an IR query.",
"type_str": "table"
},
"TABREF2": {
"html": null,
"content": "<table><tr><td>Question</td><td colspan=\"2\">Q231: Who was the president of Vichy France ?</td></tr><tr><td/><td>PERSON president France</td><td>Vichy</td></tr><tr><td>Answer</td><td colspan=\"2\">\"Marshall Philippe Petain, head of Vichy France PERSON(Marshall Philippe Petain) Vichy head government France</td></tr><tr><td/><td>government\"</td></tr></table>",
"num": null,
"text": "Class L2-2: Either the question semantic form or the answer semantic form contain new con-cepts, that impose a bridging inference. The knowledge used for inference is of lexical nature and is later employed for abductions that justify the correctness of the answer. For example:",
"type_str": "table"
},
"TABREF3": {
"html": null,
"content": "<table><tr><td>Question</td><td colspan=\"4\">Q318: Where did Bill Gates go to college?</td></tr><tr><td/><td>ORGANIZATION</td><td>go</td><td>college</td><td>Bill Gates</td></tr><tr><td>Answer</td><td colspan=\"4\">Bill Gates \"Harvard dropout and Microsoft founder\" ORGANIZATION=college(Harvard) dropout founder Microsoft</td></tr></table>",
"num": null,
"text": "Class L3-1: is characterized by the need for contextual information, brought forward by reference resolution. In the following example, a chain of coreference links Bill Gates and Microsoft founder in the candidate answer:",
"type_str": "table"
},
"TABREF7": {
"html": null,
"content": "<table/>",
"num": null,
"text": "Two classes of TREC-9 question reformulations.",
"type_str": "table"
},
"TABREF10": {
"html": null,
"content": "<table/>",
"num": null,
"text": "Number of feedbacks on the TREC test data",
"type_str": "table"
},
"TABREF12": {
"html": null,
"content": "<table><tr><td>L1=Loop 1; L2=Loop 2; L3=Loop 3.</td></tr></table>",
"num": null,
"text": "Effect of feedbacks on accuracy.",
"type_str": "table"
}
}
}
} |