File size: 69,408 Bytes
6fa4bc9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 | {
"paper_id": "U07-1010",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T03:08:59.005199Z"
},
"title": "Named Entity Recognition in Question Answering of Speech Data",
"authors": [
{
"first": "Diego",
"middle": [],
"last": "Moll\u00e1",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Macquarie University North Ryde",
"location": {
"country": "Australia"
}
},
"email": ""
},
{
"first": "Menno",
"middle": [],
"last": "Van Zaanen",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Macquarie University North Ryde",
"location": {
"country": "Australia"
}
},
"email": ""
},
{
"first": "Steve",
"middle": [],
"last": "Cassidy",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Macquarie University North Ryde",
"location": {
"country": "Australia"
}
},
"email": "cassidy@ics.mq.edu.au"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Question answering on speech transcripts (QAst) is a pilot track of the CLEF competition. In this paper we present our contribution to QAst, which is centred on a study of Named Entity (NE) recognition on speech transcripts, and how it impacts on the accuracy of the final question answering system. We have ported AFNER, the NE recogniser of the AnswerFinder questionanswering project, to the set of answer types expected in the QAst track. AFNER uses a combination of regular expressions, lists of names (gazetteers) and machine learning to find NeWS in the data. The machine learning component was trained on a development set of the AMI corpus. In the process we identified various problems with scalability of the system and the existence of errors of the extracted annotation, which lead to relatively poor performance in general. Performance was yet comparable with state of the art, and the system was second (out of three participants) in one of the QAst subtasks.",
"pdf_parse": {
"paper_id": "U07-1010",
"_pdf_hash": "",
"abstract": [
{
"text": "Question answering on speech transcripts (QAst) is a pilot track of the CLEF competition. In this paper we present our contribution to QAst, which is centred on a study of Named Entity (NE) recognition on speech transcripts, and how it impacts on the accuracy of the final question answering system. We have ported AFNER, the NE recogniser of the AnswerFinder questionanswering project, to the set of answer types expected in the QAst track. AFNER uses a combination of regular expressions, lists of names (gazetteers) and machine learning to find NeWS in the data. The machine learning component was trained on a development set of the AMI corpus. In the process we identified various problems with scalability of the system and the existence of errors of the extracted annotation, which lead to relatively poor performance in general. Performance was yet comparable with state of the art, and the system was second (out of three participants) in one of the QAst subtasks.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "AnswerFinder is a question answering system that focuses on shallow semantic representations of questions and text . The underlying idea of AnswerFinder is that the use of semantic representations reduces the impact of paraphrases (different wordings of the same information). The system uses symbolic algorithms to find exact answers to questions in large document collections.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The design and implementation of the An-swerFinder system has been driven by requirements that the system should be easy to configure, extend, and, therefore, port to new domains. To measure the success of the implementation of AnswerFinder in these respects, we decided to participate in the CLEF 2007 pilot task of question answering on speech transcripts (QAst). The task in this competition is different from that for which AnswerFinder was originally designed and provides a good test of portability to new domains.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The current CLEF pilot track QAst presents an interesting and challenging new application of question answering. The objective in QAst is to answer questions based on transcripts of meetings and lectures. Both automatic and manual transcripts are provided; the automatic transcripts being the result of applying a speech recogniser to the audio recordings. The data for the task is taken from corpora collected by the AMI (Augmented Multiparty Interaction) project (Carletta et al., 2005) and from the CHIL (Computers in the Human Interaction Loop) project (Waibel et al., 2004) . While both corpora are extensively annotated, only speaker turn annotation is provided in the input data for this task.",
"cite_spans": [
{
"start": 465,
"end": 488,
"text": "(Carletta et al., 2005)",
"ref_id": "BIBREF0"
},
{
"start": 557,
"end": 578,
"text": "(Waibel et al., 2004)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In our contribution we focus on adapting AFNER, our Named Entity Recogniser (NER), for speech transcripts and its application for Question Answering. Named Entity (NE) recognition is the task of finding instances of specific types of entities in free text. This module is typically one of the most impor-tant sources of possible answers available to QA systems and therefore an improvement on its accuracy should result on an improvement of the accuracy of the complete QA system.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The AFNER system, like the AnswerFinder system, was designed with flexibility in mind. Since the properties of the NE recognition task in this competition are in several respects quite different to those of the task AFNER was originally designed for (as discussed in section 3.3), the QAst competition also allows us to measure the success of our AFNER implementation according to the configurability and extensibility criteria.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The task of Text-Based Question Answering (QA) has been very active during the last decade, mostly thanks to the Question Answering track of the Text REtrieval Conference (TREC) (Voorhees, 1999) . The kinds of questions being asked range from factbased questions (also known as factoid questions)",
"cite_spans": [
{
"start": 178,
"end": 194,
"text": "(Voorhees, 1999)",
"ref_id": "BIBREF11"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Question Answering on Speech Transcripts",
"sec_num": "2"
},
{
"text": "to questions whose answer is a list of facts, or definitions. The methods and techniques used have converged to a prototypical, pipeline-based architecture like the one we will describe here, and only recently the task has been diversified to more complex tasks such as TREC's QA task of complex interactive question answering (Dang and Lin, 2007) or the Document Understanding Conference (DUC)'s track of query-driven summarisation (Dang, 2006) . Whereas the TREC competitions concentrate on searching in English texts, CLEF (Cross-Language Evaluation Forum) focuses on non-English, crosslingual and multi-lingual search. Within this forum several competitions are organised. The QAst track deals with question answering on speech data.",
"cite_spans": [
{
"start": 327,
"end": 347,
"text": "(Dang and Lin, 2007)",
"ref_id": "BIBREF2"
},
{
"start": 433,
"end": 445,
"text": "(Dang, 2006)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Question Answering on Speech Transcripts",
"sec_num": "2"
},
{
"text": "Prior to the QAst pilot track of CLEF there has been very little work on the area of question answering of speech data. Much of the work has focused on the task of recognising named entities by applying machine learning using features that leverage the very special kinds of information of speech data, particularly the lack of punctuation and capitalisation information. The work by Surdeanu et al. (2005) is an example of such an approach. Another line of work tries to recover the lost capitalisa-tion information by using machine learning methods trained on regular text and tested on text where all capitalisation information has been removed. This is the approach followed, for example, by Li et al. (2003) . Note, however, that Li et al. did not work on speech data as we are trying to do here but on regular text where case information has been removed. As we discuss below, speech data have many other factors that need to be taken into consideration.",
"cite_spans": [
{
"start": 384,
"end": 406,
"text": "Surdeanu et al. (2005)",
"ref_id": "BIBREF8"
},
{
"start": 696,
"end": 712,
"text": "Li et al. (2003)",
"ref_id": "BIBREF4"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Question Answering on Speech Transcripts",
"sec_num": "2"
},
{
"text": "Two data sets were provided by CLEF for development of systems participating in the evaluation. These were transcripts of lectures taken from the CHIL (Waibel et al., 2004) project and meetings from the AMI (Carletta et al., 2005) project. We made use of the AMI data because we had access to the original annotations which included named entities. This data consists of transcripts of 35 meetings each with up to four speakers. These contained around 254,000 words of dialogue. Due to disk space constraints we only made use of 15 meetings containing around 160,000 words in the development of our system.",
"cite_spans": [
{
"start": 151,
"end": 172,
"text": "(Waibel et al., 2004)",
"ref_id": "BIBREF13"
},
{
"start": 207,
"end": 230,
"text": "(Carletta et al., 2005)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Question Answering on Speech Transcripts",
"sec_num": "2"
},
{
"text": "The AnswerFinder question answering system is essentially a framework consisting of several phases that work in a sequential manner. For each of the phases, a specific algorithm has to be selected to create a particular instantiation of the framework. The aim of each of the phases is to reduce the amount of data the system has to handle from then on. This allows later phases to perform computationally more expensive operations on the remaining data. The first phase is a document retrieval phase that selects documents relevant to the question. AnswerFinder was developed to work on large document collections and this phase can make a significant reduction in the amount of text that must be handled in subsequent steps. Next is the sentence selection phase which selects a subset of sentences from the relevant documents selected in the previous phase. During sentence selection, all sentences that are still left (e.g. all sentences in the selected documents in the first step) are scored against the question using a relevance metric. The most relevant sentences according to this metric are kept for further processing. This phase can be applied to the remaining sentences several times using different metrics, each time reducing the number of sentences further.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "AnswerFinder",
"sec_num": "2.1"
},
{
"text": "After sentence selection, the remaining sentences are passed to the answer selection phase. The answer selection phase aims at selecting the best of the possible answers to return to the user. In the experiments described here, the list of possible answers is generated by applying a NER to the remaining sentences. 1 Next, the question is analysed, providing information about the kind of answer that is required. From the possible answers, those that match the type of answer required by the question are selected and scored. Finally, the best answer is returned to the user. Best answer in this context is considered to be the answer that has both the highest score and an answer type that matches the question, or simply the answer with the highest score if none of the possible answers fit the expected answer type.",
"cite_spans": [
{
"start": 316,
"end": 317,
"text": "1",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "AnswerFinder",
"sec_num": "2.1"
},
{
"text": "Question answering on speech transcripts introduces specific challenges compared to text-based QA due to the nature of the genre and the process of transcription. AnswerFinder has been initially developed to work on news articles which are typically well-written pieces of text. The casual, multi-party spoken language used in this evaluation is very dif-",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Applying AnswerFinder to Speech Transcripts",
"sec_num": "2.2"
},
{
"text": "\u2022 There are filling words that usually do not appear in free text (and in particular news text), such as \"er\", \"uh\", etc. In our experiments, this is particularly problematic when these words appear inside a named entity, e.g. \"Rufford, um, Sanatorium, that's right\".",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Applying AnswerFinder to Speech Transcripts",
"sec_num": "2.2"
},
{
"text": "\u2022 The grammatical structure of the transcription does not conform to that of free text. Consequently most tools, such as parsers and chunkers, which would normally be used in specific AnswerFinder phases, produce very poor results.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Applying AnswerFinder to Speech Transcripts",
"sec_num": "2.2"
},
{
"text": "\u2022 If the transcript is an automatic transcript (produced by a speech recogniser) there are errors of transcription and missing information, most notably punctuation characters and capitalised characters. This information is used in many phases of AnswerFinder when answering questions on news data.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Applying AnswerFinder to Speech Transcripts",
"sec_num": "2.2"
},
{
"text": "\u2022 During training, a corpus annotated with named entities is used. The density of named entities in free speech is much smaller than in usual corpora (containing news text).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Applying AnswerFinder to Speech Transcripts",
"sec_num": "2.2"
},
{
"text": "Many of the above features make it difficult to do traditional linguistic processing such as parsing and semantic interpretation. For this reason, many of the instantiations of the phases we have implemented, which typically use complex linguistic processing (as described in van Zaanen et al. 2007) would not perform well. We consequently decided not to use some of AnswerFinder's more linguisticallyintensive modules. Instead we focused on attempting to increase the accuracy of the task of recognition of named entities. Thus, the question answering method used for QAst is entirely based on the task of finding and selecting the right entities.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Applying AnswerFinder to Speech Transcripts",
"sec_num": "2.2"
},
{
"text": "In particular, the instantiation of the An-swerFinder framework that generated the QAst 2007 results consists of the following algorithms for the phases:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Applying AnswerFinder to Speech Transcripts",
"sec_num": "2.2"
},
{
"text": "\u2022 The document selection component returns the full list of documents provided for the complete list of questions. The total number of documents provided by the organisers of QAst is fairly small and therefore the other components of AnswerFinder are able to handle all documents. Essentially no documents are preselected in this instantiation. We do not attempt to rank the documents in any way.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Applying AnswerFinder to Speech Transcripts",
"sec_num": "2.2"
},
{
"text": "\u2022 As a pre-processing step, the named entity recogniser is run over all the documents. This allows for more efficient handling of the set of questions, as named entity recognition only has to occur once.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Applying AnswerFinder to Speech Transcripts",
"sec_num": "2.2"
},
{
"text": "\u2022 The sentence selection component is based on the word overlap between the question and the document sentences. This metric counts the number of words that can be found in both question and sentence after removing stop words. A simple sentence splitter method is used, which relies on the existence of punctuation marks when available, or on speech turns. Only sentences that contain NEs of the required type are considered.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Applying AnswerFinder to Speech Transcripts",
"sec_num": "2.2"
},
{
"text": "\u2022 Each of the named entities found in the selected sentences are scored. The score of a NE is the sum of the number of occurrences of that NE with a particular type.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Applying AnswerFinder to Speech Transcripts",
"sec_num": "2.2"
},
{
"text": "\u2022 The question classification component is based on a decision list of hand-constructed patterns of regular expressions. Each regular expression determines a question type and consequently a set of NE types.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Applying AnswerFinder to Speech Transcripts",
"sec_num": "2.2"
},
{
"text": "\u2022 The answer extraction component selects five NEs that are of the expected answer type and have the highest NE scores. QAst allows for the system to return up to five answers. If four or fewer NEs of the correct type are found, then a NIL answer (meaning no answer) is returned as an option after presenting all found NEs. If no NEs of the expected type are found at all, the returned answer is NIL.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Applying AnswerFinder to Speech Transcripts",
"sec_num": "2.2"
},
{
"text": "Within the AnswerFinder project, we recently incorporated a purpose-built NER, called AFNER . This NER has been specifically designed for the task of QA. AFNER differs from other NERs in that it aims to increase recall of recognition of entities, at the expense of a possible loss of precision . Crucially, it allows the allocation of multiple tags to the same string, thus handling the case of ambiguous entities or difficult entities by not committing to a single tag. The rationale is that we do not want to remove the right answer at this stage. Instead we let the final answer extraction and scoring mechanism make the final decision about what is a good answer. AFNER is ultimately based on machine learning. We use a maximum entropy classifier, and the implementation of this classifier is adapted from Franz Josef Och's YASMET 2 . Obviously, the selection of the features used in the classifier is very important.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "AFNER",
"sec_num": "3"
},
{
"text": "The features used by AFNER combine three kinds of information: regular expressions, gazetteers, and properties internal and external to the token. These features are described in more detail elsewhere and we will only briefly present them here.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Features",
"sec_num": "3.1"
},
{
"text": "The regular expressions used in AFNER are manually created and are useful for identifying strings that match patterns that are characteristic to entity types such as dates, times, percentages, and monetary expressions. These types of named entities are relatively standardised and therefore easy to find with high precision. However, the range of entities that can be discovered using regular expressions is limited. Matching a particular regular expression is a key feature used in identifying entities of these particular types.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Features",
"sec_num": "3.1"
},
{
"text": "Gazetteers are useful for finding commonly referenced entities such as names. AFNER uses three lists (locations, person names, and organisations), with a total of about 55,000 entries. The occurrence of tokens in one of the gazetteers is incorporated in the machine learning component. This allows for,",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Features",
"sec_num": "3.1"
},
{
"text": "The token is a member of a gazetteer InitCaps",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Regular Expressions Specific patterns for dates, times, etc FoundInList",
"sec_num": null
},
{
"text": "The first letter is a capital letter AllCaps",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Regular Expressions Specific patterns for dates, times, etc FoundInList",
"sec_num": null
},
{
"text": "The entire word is capitalised MixedCaps",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Regular Expressions Specific patterns for dates, times, etc FoundInList",
"sec_num": null
},
{
"text": "The word contains upper case and lower case letters IsSentEnd",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Regular Expressions Specific patterns for dates, times, etc FoundInList",
"sec_num": null
},
{
"text": "The token is an end of sentence character InitCapPeriod Starts with capital letter and ends with period OneCap",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Regular Expressions Specific patterns for dates, times, etc FoundInList",
"sec_num": null
},
{
"text": "The word is a single capitalised letter ContainDigit",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Regular Expressions Specific patterns for dates, times, etc FoundInList",
"sec_num": null
},
{
"text": "The word contains a digit NumberString",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Regular Expressions Specific patterns for dates, times, etc FoundInList",
"sec_num": null
},
{
"text": "The word is a number word ('one', 'thousand', etc.) PrepPreceded",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Regular Expressions Specific patterns for dates, times, etc FoundInList",
"sec_num": null
},
{
"text": "The word is preceded by a preposition (in a window of 4 tokens) PrevClass",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Regular Expressions Specific patterns for dates, times, etc FoundInList",
"sec_num": null
},
{
"text": "The class assigned to the previous token ProbClass",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Regular Expressions Specific patterns for dates, times, etc FoundInList",
"sec_num": null
},
{
"text": "The probability assigned to a particular class in the previous token AlwaysCapped",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Regular Expressions Specific patterns for dates, times, etc FoundInList",
"sec_num": null
},
{
"text": "The token is capitalised every time it appears Table 1 : A selection of features used in AFNER for example, context information in the final decision of the tag assignment for that particular token. Finally, there are three types of features that relate to specific aspects of the separate tokens. The first type focuses on internal evidence and highlights token properties including capitalisation, alpha/numeric information, etc. Some specific features are listed in Table 1 .",
"cite_spans": [],
"ref_spans": [
{
"start": 47,
"end": 54,
"text": "Table 1",
"ref_id": null
},
{
"start": 469,
"end": 476,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Regular Expressions Specific patterns for dates, times, etc FoundInList",
"sec_num": null
},
{
"text": "The second type of features focuses on external evidence that relates a token to tokens in surrounding text. Features that indicate which class has been assigned to the previous tokens and all of its class probabilities are also part of this type of feature.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Regular Expressions Specific patterns for dates, times, etc FoundInList",
"sec_num": null
},
{
"text": "The last type of features focuses on global evidence related to all occurrences of the same token. These features are mainly inspired on features described by Chieu and Ng (2002) . Currently AFNER only checks whether a token is always capitalised in a passage of text.",
"cite_spans": [
{
"start": 159,
"end": 178,
"text": "Chieu and Ng (2002)",
"ref_id": "BIBREF1"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Regular Expressions Specific patterns for dates, times, etc FoundInList",
"sec_num": null
},
{
"text": "The features described in the previous section are used in a maximum entropy classifier which for each token and for each category computes the probability of the token belonging to the category. Categories in this case are the named entity types prepended with 'B' and 'I' (indicating whether the token is at the beginning or inside a NE respectively), and a general 'OUT' category for tokens not in any entity. So for n named entities, n * 2 + 1 categories are used.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "General Method",
"sec_num": "3.2"
},
{
"text": "The classifier returns a list of tags for each token ordered based on probability. We select only those tags that have a probability of more than half of the probability of the next tag in the list. This initial threshold already removes tags that have a low probability. However, we also only allow a certain maximum number of tags to pass through. Preliminary experiments revealed that often the top two or three tag probabilities have similar values, but that tags lower down the list still pass the initial threshold, while they are not correct. By setting a threshold that limits the maximum number of tags to be returned we also filter those out. The results presented in this paper are generated by setting the second threshold to allow two tags per token. Initial experiments showed that this increases recall considerably. Allowing more tags increases recall only slightly while decreasing precision considerably.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "General Method",
"sec_num": "3.2"
},
{
"text": "Once tokens are assigned tags, they are combined to produce the final list of multi-word NEs as described elsewhere . The result is an assignment of named entities to the sequence of tags where the named entities may be nested. This way we aim at high recall by allowing multiple interpretations of problematic strings that could be ambiguous. Table 3 : Entity types used in the original version of AFNER",
"cite_spans": [],
"ref_spans": [
{
"start": 344,
"end": 351,
"text": "Table 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "General Method",
"sec_num": "3.2"
},
{
"text": "AFNER has been developed to work on news data, and as such, we had to modify parts of the system to allow it to be used in the QAst task. The first adaptation of AFNER is the selection of NE types. Originally AFNER focused on a limited set of entities similar to those defined in the Message Understanding Conferences (Sundheim, 1995) , and listed in Table 3 . For QAst we used a set of entity types that closely resembles the kinds of answers expected, as described by the QAst 2007 specification. The types used by the modified AFNER are listed in Table 2 .",
"cite_spans": [
{
"start": 318,
"end": 334,
"text": "(Sundheim, 1995)",
"ref_id": "BIBREF7"
}
],
"ref_spans": [
{
"start": 351,
"end": 358,
"text": "Table 3",
"ref_id": null
},
{
"start": 550,
"end": 557,
"text": "Table 2",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Adaptation of AFNER to QAst",
"sec_num": "3.3"
},
{
"text": "The regular expressions that are used in AFNER to find MUC-type named entities were extended to cover the new types of entities. This process did not require much additional work, other than adding a few common names of shapes and colours. The lists of names that was part of the initial AFNER was left untouched.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Adaptation of AFNER to QAst",
"sec_num": "3.3"
},
{
"text": "The general machine learning mechanism was left unmodified, and the set of features was also left untouched. The only difference was the choice of training corpus. We mapped the annotated entities of the BBN corpus that we had used previously, and added a fragment of the development set of the AMI corpus.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Adaptation of AFNER to QAst",
"sec_num": "3.3"
},
{
"text": "However, due to problems of scalability during training (the intermediate files produced were very large due to the increased number of classifier categories) we were not able to use all the files. For these experiments we used 26 documents from the AMI corpus and 16 from the BBN corpus. Table 2 shows the total number of entities annotated in the BBN and the AMI parts of the training set. The entity types of each kind of corpus complement each other, though some of the entity types had few instances in the corpora, most notably, the type Language only occurred nine times.",
"cite_spans": [],
"ref_spans": [
{
"start": 289,
"end": 296,
"text": "Table 2",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Adaptation of AFNER to QAst",
"sec_num": "3.3"
},
{
"text": "We decided to use the BBN corpus to complement the annotations of AMI because some entity types that were very scarce in AMI were very common in BBN. Also, the entity types annotated in AMI are not the sort of types that would typically be annotated as named entities. For example, the entity type \"Person\" would have instances like industrial designer. Furthermore, the quality of some of the annotations of the AMI corpus was poor. In at least two of the 26 meetings the contents of named entities seemed to be random strings. After submitting the results, we found a bug in our corpus processing script which resulted in some named entities having extra words included in them.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Adaptation of AFNER to QAst",
"sec_num": "3.3"
},
{
"text": "We participated in all the QAst tasks, which are described below: CHIL M Manual transcripts from the CHIL corpus of lectures;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Results",
"sec_num": "4"
},
{
"text": "CHIL A Automated transcripts from the CHIL corpus;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Results",
"sec_num": "4"
},
{
"text": "AMI M Manual transcripts from the AMI corpus of meetings; and AMI A Automated transcripts from the AMI corpus.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Results",
"sec_num": "4"
},
{
"text": "We provided two runs per task. We were interested on determine the impact of the machine learning component of AFNER. Given the reduced number of training documents and the existence of errors in some of them we expected that the machine learning component would not be useful. Thus, the first run used the full AFNER system, whereas the second run (named \"noML\") used a version of AFNER that had the machine learning component disabled (essentially only using the regular expressions and the gazetteers). The results are shown in Table 4 .",
"cite_spans": [],
"ref_spans": [
{
"start": 531,
"end": 538,
"text": "Table 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Results",
"sec_num": "4"
},
{
"text": "The results returned by CLEF indicate, as expected, comparatively poor performance with respect to the other participants. We are pleased to notice, however, that the results of task CHIL M are second best (from a group of three participants). Task CHIL M is the task that used the AMI transcripts and it was the task that we used to develop and fine-tune the system. The other tasks simply used the same settings. We are particularly pleased to learn that the results of task CHIL M are higher than the results we obtained during development time. This is possibly due to the nature of our tuning experiments, since we automatically applied the answer patterns to the answers found, and it could have been the case that correct answers which happened not to match the patterns were automatically marked as incorrect in our experiments. The evaluations carried by CLEF used human judges so they would be able to detect correct answers that had an unusual format.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Results",
"sec_num": "4"
},
{
"text": "The results indicate that none of the differences in results between the full and the noML runs are statistically significant under the paired t-test. This confirms our suspicion that the machine learning component of AFNER was not helping the question answering process at all. The likely reason for this is, as described above, the small size of the training data and the existence of noise in the NE annotations of the AMI corpus.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Results",
"sec_num": "4"
},
{
"text": "Our method to handle NIL questions is simple yet relatively effective to the point that correct NIL answers were an important part of the correct answers. Task AMI A in particular, which has 15 NIL questions, results in a halved MRR (from 14.10% down to 7.05% in our noML run) when all NIL questions are removed. It is encouraging to observe that, even after removing all NIL questions, task CHIL M has relatively good results (from 26.39% down to 22.38% in our noML run). The results of the non-NIL questions are shown in Table 5 .",
"cite_spans": [],
"ref_spans": [
{
"start": 523,
"end": 530,
"text": "Table 5",
"ref_id": null
}
],
"eq_spans": [],
"section": "Results",
"sec_num": "4"
},
{
"text": "In our contribution to the QAst competition we reused as much as we could of AnswerFinder, our question answering system, and AFNER, our Named Entity recogniser. Due to the nature of the speech corpus we needed to simplify the processing done by AnswerFinder and made it rely more heavily on the entities found by AFNER. The whole experiment showed successfully that both AnswerFinder and AFNER are flexible and can be adapted easily to new tasks.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions and Further Work",
"sec_num": "5"
},
{
"text": "The small training corpus and the presence of annotation errors in the AMI corpus made the machine learning component of AFNER ineffective. An immediate line of further research is to investigate the cause of the errors, and correct them. Other lines of research are:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions and Further Work",
"sec_num": "5"
},
{
"text": "\u2022 Revise the machine learning component of AFNER, possibly replace it with another more scalable method, so that larger training corpora can be used. Currently we are investigating more efficient ways of storing the intermediate data.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions and Further Work",
"sec_num": "5"
},
{
"text": "In general, some sentence selection methods have the ability to generate possible answers that can also be selected during the answer selection phase. However, these algorithms are not used in these experiments as will be discussed in section 2.2.ferent. For example,\u2022 There are frequent false starts and sentences that are interrupted in the discourse.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "http://www.fjoch.com/YASMET.html",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "\u2022 Review the features used for identifying the entities. Most of the current features rely on information about capitalisation, presence of digits, or punctuation marks but none of those are available on speech transcripts. In practice, using features that always provide the same values means that the machine learning component does not add much to the non-machine learning information, as shown in the experiment. More useful features will increase the use of the machine learning component.\u2022 Use additional corpora. There are a few corpora of speech transcriptions available with annotations of named entities that we could use. Among the options is the corpus of speech transcripts within the SQUAD project with the UK Data Archive at the University of Edinburgh.To conclude, question answering on speech transcripts is a challenging task that deserves greater attention by the research community. The CLEF QAst track is a step toward facilitating research on this area. Our participation in QAst is a step from our side to contribute to this exciting research area.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Run",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "The ami meetings corpus",
"authors": [
{
"first": "Jean",
"middle": [],
"last": "Carletta",
"suffix": ""
},
{
"first": "Simone",
"middle": [],
"last": "Ashby",
"suffix": ""
},
{
"first": "Sebastien",
"middle": [],
"last": "Bourban",
"suffix": ""
},
{
"first": "Mike",
"middle": [],
"last": "Flynn",
"suffix": ""
},
{
"first": "Mael",
"middle": [],
"last": "Guillemot",
"suffix": ""
},
{
"first": "Thomas",
"middle": [],
"last": "Hain",
"suffix": ""
},
{
"first": "Jaroslav",
"middle": [],
"last": "Kadlec",
"suffix": ""
},
{
"first": "Vasilis",
"middle": [],
"last": "Karaiskos",
"suffix": ""
},
{
"first": "Wessel",
"middle": [],
"last": "Kraaij",
"suffix": ""
},
{
"first": "Melissa",
"middle": [],
"last": "Kronenthal",
"suffix": ""
},
{
"first": "Guillaume",
"middle": [],
"last": "Lathoud",
"suffix": ""
},
{
"first": "Mike",
"middle": [],
"last": "Lincoln",
"suffix": ""
},
{
"first": "Agnes",
"middle": [],
"last": "Lisowska",
"suffix": ""
},
{
"first": "Iain",
"middle": [
"A"
],
"last": "Mccowan",
"suffix": ""
},
{
"first": "Wilfried",
"middle": [],
"last": "Post",
"suffix": ""
},
{
"first": "Dennis",
"middle": [],
"last": "Reidsma",
"suffix": ""
},
{
"first": "Pierre",
"middle": [],
"last": "Wellner",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of the Measuring Behavior 2005 symposium on \"Annotating and measuring Meeting Behavior",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jean Carletta, Simone Ashby, Sebastien Bourban, Mike Flynn, Mael Guillemot, Thomas Hain, Jaroslav Kadlec, Vasilis Karaiskos, Wessel Kraaij, Melissa Kronenthal, Guillaume Lathoud, Mike Lincoln, Agnes Lisowska, Iain A. McCowan, Wilfried Post, Dennis Reidsma, and Pierre Wellner. 2005. The ami meetings corpus. In L. P. J. J. Noldus, F. Grieco, L. W. S. Loi- jens, and Patrick H. Zimmerman, editors, Proceedings of the Measuring Behavior 2005 symposium on \"An- notating and measuring Meeting Behavior\". AMI- 108.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Named entity recognition: A maximum entropy approach using global information",
"authors": [
{
"first": "Haoi",
"middle": [],
"last": "Leong Chieu",
"suffix": ""
},
{
"first": "Hwee Tou",
"middle": [],
"last": "Ng",
"suffix": ""
}
],
"year": 2002,
"venue": "Proceedings COLING",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Haoi Leong Chieu and Hwee Tou Ng. 2002. Named en- tity recognition: A maximum entropy approach using global information. In Proceedings COLING 2002.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Different structures for evaluating answers to complex questions: Pyramids won't topple, and neither will human assessors",
"authors": [
{
"first": "Hoa",
"middle": [],
"last": "Dang",
"suffix": ""
},
{
"first": "Jimmy",
"middle": [],
"last": "Lin",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings ACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hoa Dang and Jimmy Lin. 2007. Different structures for evaluating answers to complex questions: Pyra- mids won't topple, and neither will human assessors. In Proceedings ACL.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Duc 2005: Evaluation of question-focused summarization systems",
"authors": [
{
"first": "Hoa",
"middle": [
"Tran"
],
"last": "Dang",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of the Workshop on Task-Focused Summarization and Question Answering",
"volume": "",
"issue": "",
"pages": "48--55",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hoa Tran Dang. 2006. Duc 2005: Evaluation of question-focused summarization systems. In Proceed- ings of the Workshop on Task-Focused Summarization and Question Answering, pages 48-55, Sydney. Asso- ciation for Computational Linguistics.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Question answering on a case insensitive corpus",
"authors": [
{
"first": "Wei",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Rohini",
"middle": [],
"last": "Srihari",
"suffix": ""
},
{
"first": "Cheng",
"middle": [],
"last": "Niu",
"suffix": ""
},
{
"first": "Xiaoge",
"middle": [],
"last": "Li",
"suffix": ""
}
],
"year": 2003,
"venue": "Proc. ACL 2003 Workshop on Multilingual Summarization and Question Answering",
"volume": "",
"issue": "",
"pages": "84--93",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Wei Li, Rohini Srihari, Cheng Niu, and Xiaoge Li. 2003. Question answering on a case insensitive corpus. In Proc. ACL 2003 Workshop on Multilingual Summa- rization and Question Answering, pages 84-93.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Answerfinder at TREC",
"authors": [
{
"first": "Diego",
"middle": [],
"last": "Moll\u00e1",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Menno Van Zaanen",
"suffix": ""
}
],
"year": 2005,
"venue": "Proc. TREC 2005. NIST",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Diego Moll\u00e1 and Menno van Zaanen. 2006. An- swerfinder at TREC 2005. In Ellen M. Voorhees and Lori P. Buckland, editors, Proc. TREC 2005. NIST.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Named entity recognition for question answering",
"authors": [
{
"first": "Diego",
"middle": [],
"last": "Moll\u00e1",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Menno Van Zaanen",
"suffix": ""
},
{
"first": "A",
"middle": [
"S"
],
"last": "Luiz",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Pizzato",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings ALTW 2006",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Diego Moll\u00e1, Menno van Zaanen, and Luiz A.S. Pizzato. 2006. Named entity recognition for question answer- ing. In Proceedings ALTW 2006, page 8 pages.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Overview of results of the MUC-6 evaluation",
"authors": [
{
"first": "M",
"middle": [],
"last": "Beth",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Sundheim",
"suffix": ""
}
],
"year": 1995,
"venue": "Proc. Sixth Message Understanding Conference MUC-6",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Beth M. Sundheim. 1995. Overview of results of the MUC-6 evaluation. In Proc. Sixth Message Under- standing Conference MUC-6. Morgan Kaufmann Pub- lishers, Inc.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Named entity recognition from spontaneous opendomain speech",
"authors": [
{
"first": "Mihai",
"middle": [],
"last": "Surdeanu",
"suffix": ""
},
{
"first": "Jordi",
"middle": [],
"last": "Turmo",
"suffix": ""
},
{
"first": "Eli",
"middle": [],
"last": "Comelles",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings Interspeech-05",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mihai Surdeanu, Jordi Turmo, and Eli Comelles. 2005. Named entity recognition from spontaneous open- domain speech. In Proceedings Interspeech-05, Lis- bon.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "A named entity recogniser for question answering",
"authors": [
{
"first": "Diego",
"middle": [],
"last": "Menno Van Zaanen",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Moll\u00e1",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings PACLING",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Menno van Zaanen and Diego Moll\u00e1. 2007. A named en- tity recogniser for question answering. In Proceedings PACLING 2007.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Answerfinder at trec",
"authors": [
{
"first": "Diego",
"middle": [],
"last": "Menno Van Zaanen",
"suffix": ""
},
{
"first": "Luiz",
"middle": [],
"last": "Moll\u00e1",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Pizzato",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings TREC 2006",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Menno van Zaanen, Diego Moll\u00e1, and Luiz Pizzato. 2007. Answerfinder at trec 2006. In Ellen M. Voorhees and Lori P. Buckland, editors, Proceedings TREC 2006, page 8 pages.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "The TREC-8 question answering track report",
"authors": [
{
"first": "Ellen",
"middle": [
"M"
],
"last": "Voorhees",
"suffix": ""
}
],
"year": 1999,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ellen M. Voorhees. 1999. The TREC-8 question answer- ing track report. In Ellen M. Voorhees and Donna K.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Proc. TREC-8, number 500-246 in NIST Special Publication. NIST",
"authors": [
{
"first": "",
"middle": [],
"last": "Harman",
"suffix": ""
}
],
"year": null,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Harman, editors, Proc. TREC-8, number 500-246 in NIST Special Publication. NIST.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Chil -computers in the human interaction loop",
"authors": [
{
"first": "A",
"middle": [],
"last": "Waibel",
"suffix": ""
},
{
"first": "H",
"middle": [],
"last": "Steusloff",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Stiefelhagen",
"suffix": ""
}
],
"year": 2004,
"venue": "5th International Workshop on Image Analysis for Multimedia Interactive Services",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "A. Waibel, H. Steusloff, and R. Stiefelhagen. 2004. Chil -computers in the human interaction loop. In 5th In- ternational Workshop on Image Analysis for Multime- dia Interactive Services, Lisbon, Portugal.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"text": "AnswerFinder system overview Figure 1 provides an overview of the An-swerFinder framework.",
"num": null,
"uris": null,
"type_str": "figure"
},
"TABREF1": {
"text": "Named Entities used for QAst. The numbers of entities listed in the two last columns refer to the actual training set (a subset of BBN and AMI).",
"content": "<table><tr><td>Class</td><td>Type</td></tr><tr><td colspan=\"2\">ENAMEX Organization</td></tr><tr><td/><td>Person</td></tr><tr><td/><td>Location</td></tr><tr><td>TIMEX</td><td>Date</td></tr><tr><td/><td>Time</td></tr><tr><td>NUMEX</td><td>Money</td></tr><tr><td/><td>Percent</td></tr></table>",
"html": null,
"num": null,
"type_str": "table"
}
}
}
} |