File size: 76,605 Bytes
6fa4bc9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 | {
"paper_id": "S13-1004",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T15:41:35.007830Z"
},
"title": "*SEM 2013 shared task: Semantic Textual Similarity",
"authors": [
{
"first": "Eneko",
"middle": [],
"last": "Agirre",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of the Basque Country",
"location": {}
},
"email": "e.agirre@ehu.es"
},
{
"first": "Daniel",
"middle": [],
"last": "Cer",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Stanford University",
"location": {}
},
"email": "danielcer@stanford.edu"
},
{
"first": "Mona",
"middle": [],
"last": "Diab",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Washington University",
"location": {
"settlement": "George"
}
},
"email": "mtdiab@gwu.edu"
},
{
"first": "Aitor",
"middle": [],
"last": "Gonzalez-Agirre",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of the Basque Country",
"location": {}
},
"email": ""
},
{
"first": "Weiwei",
"middle": [],
"last": "Guo",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Columbia University",
"location": {}
},
"email": "weiwei@cs.columbia.edu"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "In Semantic Textual Similarity (STS), systems rate the degree of semantic equivalence, on a graded scale from 0 to 5, with 5 being the most similar. This year we set up two tasks: (i) a core task (CORE), and (ii) a typed-similarity task (TYPED). CORE is similar in set up to SemEval STS 2012 task with pairs of sentences from sources related to those of 2012, yet different in genre from the 2012 set, namely, this year we included newswire headlines, machine translation evaluation datasets and multiple lexical resource glossed sets. TYPED, on the other hand, is novel and tries to characterize why two items are deemed similar, using cultural heritage items which are described with metadata such as title, author or description. Several types of similarity have been defined, including similar author, similar time period or similar location. The annotation for both tasks leverages crowdsourcing, with relative high interannotator correlation, ranging from 62% to 87%. The CORE task attracted 34 participants with 89 runs, and the TYPED task attracted 6 teams with 14 runs.",
"pdf_parse": {
"paper_id": "S13-1004",
"_pdf_hash": "",
"abstract": [
{
"text": "In Semantic Textual Similarity (STS), systems rate the degree of semantic equivalence, on a graded scale from 0 to 5, with 5 being the most similar. This year we set up two tasks: (i) a core task (CORE), and (ii) a typed-similarity task (TYPED). CORE is similar in set up to SemEval STS 2012 task with pairs of sentences from sources related to those of 2012, yet different in genre from the 2012 set, namely, this year we included newswire headlines, machine translation evaluation datasets and multiple lexical resource glossed sets. TYPED, on the other hand, is novel and tries to characterize why two items are deemed similar, using cultural heritage items which are described with metadata such as title, author or description. Several types of similarity have been defined, including similar author, similar time period or similar location. The annotation for both tasks leverages crowdsourcing, with relative high interannotator correlation, ranging from 62% to 87%. The CORE task attracted 34 participants with 89 runs, and the TYPED task attracted 6 teams with 14 runs.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Given two snippets of text, Semantic Textual Similarity (STS) captures the notion that some texts are more similar than others, measuring the degree of semantic equivalence. Textual similarity can range from exact semantic equivalence to complete unrelatedness, corresponding to quantified values between 5 and 0. The graded similarity intuitively captures the notion of intermediate shades of similarity such as pairs of text differ only in some minor nuanced aspects of meaning only, to relatively important differences in meaning, to sharing only some details, or to simply being related to the same topic, as shown in Figure 1 .",
"cite_spans": [],
"ref_spans": [
{
"start": 622,
"end": 630,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "One of the goals of the STS task is to create a unified framework for combining several semantic components that otherwise have historically tended to be evaluated independently and without characterization of impact on NLP applications. By providing such a framework, STS will allow for an extrinsic evaluation for these modules. Moreover, this STS framework itself could in turn be evaluated intrinsically and extrinsically as a grey/black box within various NLP applications such as Machine Translation (MT), Summarization, Generation, Question Answering (QA), etc.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "STS is related to both Textual Entailment (TE) and Paraphrasing, but differs in a number of ways and it is more directly applicable to a number of NLP tasks. STS is different from TE inasmuch as it assumes bidirectional graded equivalence between the pair of textual snippets. In the case of TE the equivalence is directional, e.g. a car is a vehicle, but a vehicle is not necessarily a car. STS also differs from both TE and Paraphrasing (in as far as both tasks have been defined to date in the literature) in that, rather than being a binary yes/no decision (e.g. a vehicle is not a car), we define STS to be a graded similarity notion (e.g. a vehicle and a car are more similar than a wave and a car). A quantifiable graded bidirectional notion of textual similarity is useful for a myriad of NLP tasks such as MT evaluation, information extraction, question answering, summarization, etc.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "\u2022 (5) The two sentences are completely equivalent, as they mean the same thing.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The bird is bathing in the sink. Birdie is washing itself in the water basin. \u2022 (4) The two sentences are mostly equivalent, but some unimportant details differ.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In May 2010, the troops attempted to invade Kabul. The US army invaded Kabul on May 7th last year, 2010. \u2022 (3) The two sentences are roughly equivalent, but some important information differs/missing. John said he is considered a witness but not a suspect. \"He is not a suspect anymore.\" John said. \u2022 (2) The two sentences are not equivalent, but share some details.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "They flew out of the nest in groups. They flew into the nest together. \u2022 (1) The two sentences are not equivalent, but are on the same topic.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The woman is playing the violin. The young lady enjoys listening to the guitar. \u2022 (0) The two sentences are on different topics.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "John went horse back riding at dawn with a whole group of friends. Sunrise at dawn is a magnificent view to take in if you wake up early enough for it. In 2012 we held the first pilot task at SemEval 2012, as part of the *SEM 2012 conference, with great success: 35 teams participated with 88 system runs (Agirre et al., 2012) . In addition, we held a DARPA sponsored workshop at Columbia University 1 . In 2013, STS was selected as the official Shared Task of the *SEM 2013 conference. Accordingly, in STS 2013, we set up two tasks: The core task CORE, which is similar to the 2012 task; and a pilot task on typed-similarity TYPED between semi-structured records.",
"cite_spans": [
{
"start": 305,
"end": 326,
"text": "(Agirre et al., 2012)",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "For CORE, we provided all the STS 2012 data as training data, and the test data was drawn from related but different datasets. This is in contrast to the STS 2012 task where the train/test data were drawn from the same datasets. The 2012 datasets comprised the following: pairs of sentences from paraphrase datasets from news and video elicitation (MSRpar and MSRvid), machine translation evaluation data (SMTeuroparl, SMTnews) and pairs of glosses (OnWN). The current STS 2013 dataset comprises the following: pairs of news headlines, SMT evaluation sentences (SMT) and pairs of glosses (OnWN and FNWN).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The typed-similarity pilot task TYPED attempts to characterize, for the first time, the reason and/or type of similarity. STS reduces the problem of judging similarity to a single number, but, in some applications, it is important to characterize why and how two items are deemed similar, hence the added nuance. The dataset comprises pairs of Cultural Heritage items from Europeana, 2 a single access point to millions of books, paintings, films, museum objects and archival records that have been digitized throughout Europe. It is an authoritative source of information coming from European cultural and scientific institutions. Typically, the items comprise meta-data describing a cultural heritage item and, sometimes, a thumbnail of the item itself. Participating systems in the TYPED task need to compute the similarity between items, using the textual meta-data. In addition to general similarity, participants need to score specific kinds of similarity, like similar author, similar time period, etc. (cf. Figure 3) .",
"cite_spans": [],
"ref_spans": [
{
"start": 1015,
"end": 1024,
"text": "Figure 3)",
"ref_id": "FIGREF2"
}
],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The paper is structured as follows. Section 2 reports the sources of the texts used in the two tasks. Section 3 details the annotation procedure. Section 4 presents the evaluation of the systems, followed by the results of CORE and TYPED tasks. Section 6 draws on some conclusions and forward projections. 2 Source Datasets Table 1 summarizes the 2012 and 2013 datasets.",
"cite_spans": [],
"ref_spans": [
{
"start": 324,
"end": 331,
"text": "Table 1",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The CORE dataset comprises pairs of news headlines (HDL), MT evaluation sentences (SMT) and pairs of glosses (OnWN and FNWN). For HDL, we used naturally occurring news headlines gathered by the Europe Media Monitor (EMM) engine (Best et al., 2005) from several different news sources. EMM clusters together related news. Our goal was to generate a balanced data set across the different similarity ranges, hence we built two sets of headline pairs: (i) a set where the pairs come from the same EMM cluster, (ii) and another set where the headlines come from a different EMM cluster, then we computed the string similarity between those pairs. Accordingly, we sampled 375 headline pairs of headlines that occur in the same EMM cluster, aiming for pairs equally distributed between minimal and maximal similarity using simple string similarity. We sample another 375 pairs from the different EMM cluster in the same manner.",
"cite_spans": [
{
"start": 228,
"end": 247,
"text": "(Best et al., 2005)",
"ref_id": "BIBREF5"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "CORE task",
"sec_num": "2.1"
},
{
"text": "The SMT dataset comprises pairs of sentences used in machine translation evaluation. We have two different sets based on the evaluation metric used: an HTER set, and a HYTER set. Both metrics use the TER metric (Snover et al., 2006) to measure the similarity of pairs. HTER typically relies on several (1-4) reference translations. HYTER, on the other hand, leverages millions of translations. The HTER set comprises 150 pairs, where one sentence is machine translation output and the corresponding sentence is a human post-edited translation. We sample the data from the dataset used in the DARPA GALE project with an HTER score ranging from 0 to 120. The HYTER set has 600 pairs from 3 subsets (each subset contains 200 pairs): a. reference (Dreyer and Marcu, 2012) . c. machine translation vs. FST generated translation. The HYTER data set is used in (Dreyer and Marcu, 2012) .",
"cite_spans": [
{
"start": 211,
"end": 232,
"text": "(Snover et al., 2006)",
"ref_id": "BIBREF10"
},
{
"start": 743,
"end": 767,
"text": "(Dreyer and Marcu, 2012)",
"ref_id": "BIBREF6"
},
{
"start": 854,
"end": 878,
"text": "(Dreyer and Marcu, 2012)",
"ref_id": "BIBREF6"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "CORE task",
"sec_num": "2.1"
},
{
"text": "The OnWN/FnWN dataset contains gloss pairs from two sources: OntoNotes-WordNet (OnWN) and FrameNet-WordNet (FnWN). These pairs are sampled based on the string similarity ranging from 0.4 to 0.9. String similarity is used to measure the similarity between a pair of glosses. The OnWN subset comprises 561 gloss pairs from OntoNotes 4.0 (Hovy et al., 2006) and WordNet 3.0 (Fellbaum, 1998) . 370 out of the 561 pairs are sampled from the 110K sense-mapped pairs as made available from the authors. The rest, 291 pairs, are sampled from unmapped sense pairs with a string similarity ranging from 0.5 to 0.9. The FnWN subset has 189 manually mapped pairs of senses from FrameNet 1.5 (Baker et al., 1998) to WordNet 3.1. They are ran-domly selected from 426 mapped pairs. In combination, both datasets comprise 750 pairs of glosses.",
"cite_spans": [
{
"start": 335,
"end": 354,
"text": "(Hovy et al., 2006)",
"ref_id": "BIBREF8"
},
{
"start": 371,
"end": 387,
"text": "(Fellbaum, 1998)",
"ref_id": "BIBREF7"
},
{
"start": 679,
"end": 699,
"text": "(Baker et al., 1998)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "CORE task",
"sec_num": "2.1"
},
{
"text": "This task is devised in the context of the PATHS project, 3 which aims to assist users in accessing digital libraries looking for items. The project tests methods that offer suggestions about items that might be useful to recommend, to assist in the interpretation of the items, and to support the user in the discovery and exploration of the collections. Hence the task is about comparing pairs of items. The pairs are generated in the Europeana project.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Typed-similarity TYPED task",
"sec_num": "2.2"
},
{
"text": "A study in the PATHS project suggested that users would be interested in knowing why the system is suggesting related items. The study suggested seven similarity types: similar author or creator, similar people involved, similar time period, similar loca- tion, similar event or action, similar subject and similar description. In addition, we also include general similarity. Figure 3 shows the definition of each similarity type as provided to the annotators.",
"cite_spans": [],
"ref_spans": [
{
"start": 377,
"end": 385,
"text": "Figure 3",
"ref_id": "FIGREF2"
}
],
"eq_spans": [],
"section": "Typed-similarity TYPED task",
"sec_num": "2.2"
},
{
"text": "The dataset is generated in semi-automatically. First, members of the project manually select 25 pairs of items for each of the 7 similarity types (excluding general similarity), totalling 175 manually selected pairs. After removing duplicates and cleaning the dataset, we got 163 pairs. Second, we use these manually selected pairs as seeds to automatically select new pairs as follows: Starting from those seeds, we use the Europeana API to get similar items, and we repeat this process 5 times in order to diverge from the original items (we stored the vis-ited items to avoid looping). Once removed from the seed set, we select the new pairs following two approaches:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Typed-similarity TYPED task",
"sec_num": "2.2"
},
{
"text": "\u2022 Distance 1: Current item and similar item.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Typed-similarity TYPED task",
"sec_num": "2.2"
},
{
"text": "\u2022 Distance 2: Current item and an item that is similar to a similar item (twice removed distance wise) This yields 892 pairs for Distance 1 and 445 of Distance 2. We then divide the data into train and test, preserving the ratios. The train data contains 82 manually selected pairs, 446 pairs with similarity distance 1 and 222 pairs with similarity distance 2. The test data follows a similar distribution.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Typed-similarity TYPED task",
"sec_num": "2.2"
},
{
"text": "Europeana items cannot be redistributed, so we provide their urls and a script which uses the official Europeana API to access and extract the corresponding metadata in JSON format and a thumbnail. In addition, the textual fields which are relevant for the task are made accessible in text files, as follows:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Typed-similarity TYPED task",
"sec_num": "2.2"
},
{
"text": "\u2022 dcTitle: title of the item \u2022 dcSubject: list of subject terms (from some vocabulary) \u2022 dcDescription: textual description of the item Figure 1 shows the explanations and values for each score between 5 and 0. We use the Crowd-Flower crowd-sourcing service to annotate the CORE dataset. Annotators are presented with the detailed instructions given in Figure 2 and are asked to label each STS sentence pair on our 6 point scale using a dropdown box. Five sentence pairs at a time are presented to annotators. Annotators are paid 0.20 cents per set of 5 annotations and we collect 5 separate annotations per sentence pair. Annotators are restricted to people from the following countries: Australia, Canada, India, New Zealand, UK, and US.",
"cite_spans": [],
"ref_spans": [
{
"start": 136,
"end": 144,
"text": "Figure 1",
"ref_id": "FIGREF0"
},
{
"start": 353,
"end": 361,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Typed-similarity TYPED task",
"sec_num": "2.2"
},
{
"text": "To obtain high quality annotations, we create a representative gold dataset of 105 pairs that are manually annotated by the task organizers. During annotation, one gold pair is included in each set of 5 sentence pairs. Crowd annotators are required to rate 4 of the gold pairs correct to qualify to work on the task. Gold pairs are not distinguished in any way from the non-gold pairs. If the gold pairs are annotated incorrectly, annotators are told what the correct annotation is and they are given an explanation of why. CrowdFlower automatically stops low performing annotators -those with too many incorrectly labeled gold pairs -from working on the task.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Typed-similarity TYPED task",
"sec_num": "2.2"
},
{
"text": "The distribution of scores in the headlines HDL dataset is uniform, as in FNWN and OnWN, although the scores are slightly lower in FNWN and slightly higher in OnWN. The scores for SMT are not uniform, with most of the scores uniformly distributed between 3.5 and 5, a few pairs between 2 and 3.5, and nearly no pairs with values below 2.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Typed-similarity TYPED task",
"sec_num": "2.2"
},
{
"text": "The dataset is annotated using crowdsourcing. The survey contains the 1500 pairs of the dataset (750 for train and 750 for test), plus 20 gold pairs for quality control. Each participant is shown 4 training gold questions at the beginning, and then one gold every 2 or 4 questions depending on the accuracy. If accuracy dropped to less than 66.7% percent the survey is stopped and the answers from that particular annotator are discarded. Each annotator is allowed to rate a maximum of 20 pairs to avoid getting answers from people that are either tired or bored. To ensure a good comprehension of the items, the task is restricted to only accept annotators from some English speaking countries: UK, USA, Australia, Canada and New Zealand.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "TYPED task",
"sec_num": "3.2"
},
{
"text": "Participants are asked to rate the similarity between pairs of cultural heritage items from ranging from 5 to 0, following the instructions shown in Figure 3 . We also add a \"Not Applicable\" choice for cases in which annotators are not sure or didn't know. For those cases, we calculate the similarity score using the values of the rest of the annotators (if none, we convert it to 0). The instructions given to the annotators are the ones shown in Figure 3 . Figure 4 shows a pair from the dataset, as presented to annotators.",
"cite_spans": [],
"ref_spans": [
{
"start": 149,
"end": 157,
"text": "Figure 3",
"ref_id": "FIGREF2"
},
{
"start": 449,
"end": 457,
"text": "Figure 3",
"ref_id": "FIGREF2"
},
{
"start": 460,
"end": 468,
"text": "Figure 4",
"ref_id": "FIGREF3"
}
],
"eq_spans": [],
"section": "TYPED task",
"sec_num": "3.2"
},
{
"text": "The similarity scores for the pairs follow a similar distribution in all types. Most of the pairs have a score between 4 and 5, which can amount to as much as 50% of all pairs in some types.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "TYPED task",
"sec_num": "3.2"
},
{
"text": "In order to assess the annotation quality, we measure the correlation of each annotator with the average of the rest of the annotators. We then averaged all the correlations. This method to estimate the quality is identical to the method used for evaluation (see Section 4.1) and it can be thus used as the upper bound for the systems. The inter-tagger correlation in the CORE dataset for each of dataset is as follows:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quality of annotation",
"sec_num": "3.3"
},
{
"text": "\u2022 HDL: 85.0%",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quality of annotation",
"sec_num": "3.3"
},
{
"text": "\u2022 FNWN: 69.9%",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quality of annotation",
"sec_num": "3.3"
},
{
"text": "\u2022 OnWN: 87.2%",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quality of annotation",
"sec_num": "3.3"
},
{
"text": "\u2022 SMT: 65.8% For the TYPED dataset, the inter-tagger correlation values for each type of similarity is as follows:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quality of annotation",
"sec_num": "3.3"
},
{
"text": "\u2022 General: 77.0%",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quality of annotation",
"sec_num": "3.3"
},
{
"text": "\u2022 Author: 73.1%",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quality of annotation",
"sec_num": "3.3"
},
{
"text": "\u2022 People Involved: 62.5%",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quality of annotation",
"sec_num": "3.3"
},
{
"text": "\u2022 Time period: 72.0%",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quality of annotation",
"sec_num": "3.3"
},
{
"text": "\u2022 Location: 74.3%",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quality of annotation",
"sec_num": "3.3"
},
{
"text": "\u2022 Event or Action: 63.9%",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quality of annotation",
"sec_num": "3.3"
},
{
"text": "\u2022 Subject: 74.5%",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quality of annotation",
"sec_num": "3.3"
},
{
"text": "\u2022 Description: 74.9% In both datasets, the correlation figures are high, confirming that the task is well designed. The weakest correlations in the CORE task are SMT and FNWN. The first might reflect the fact that some automatically produced translations are confusing or difficult to understand, and the second could be caused by the special style used to gloss FrameNet concepts. In the TYPED task the weakest correlations are for the People Involved and Event or Action types, as they might be the most difficult to spot.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quality of annotation",
"sec_num": "3.3"
},
{
"text": "Evaluation of STS is still an open issue. STS experiments have traditionally used Pearson productmoment correlation, or, alternatively, Spearman rank order correlation. In addition, we also need a method to aggregate the results from each dataset into an overall score. The analysis performed in (Agirre and Amig\u00f3, In prep) shows that Pearson and averaging across datasets are the best suited combination in general. In particular, Pearson is more informative than Spearman, in that Spearman only takes the rank differences into account, while Pearson does account for value differences as well. The study also showed that other alternatives need to be considered, depending on the requirements of the target application.",
"cite_spans": [
{
"start": 296,
"end": 323,
"text": "(Agirre and Amig\u00f3, In prep)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation metrics",
"sec_num": "4.1"
},
{
"text": "We leave application-dependent evaluations for future work, and focus on average weighted Pearson correlation. When averaging, we weight each individual correlation by the size of the dataset. In addition, participants in the CORE task are allowed to provide a confidence score between 1 and 100 for each of their scores. The evaluation script down-weights the pairs with low confidence, following weighted Pearson. 4 In order to compute statistical significance among system results, we use a one-tailed parametric test based on Fisher's ztransformation (Press et al., 2002, equation 14.5.10 ).",
"cite_spans": [
{
"start": 416,
"end": 417,
"text": "4",
"ref_id": null
},
{
"start": 555,
"end": 592,
"text": "(Press et al., 2002, equation 14.5.10",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation metrics",
"sec_num": "4.1"
},
{
"text": "For the CORE dataset, we produce scores using a simple word overlap baseline system. We tokenize the input sentences splitting at white spaces, and then represent each sentence as a vector in the multidimensional token space. Each dimension has 1 if the token is present in the sentence, 0 otherwise. Vector similarity is computed using the cosine similarity metric. We also run two freely available systems, DKPro (Bar et al., 2012) and TakeLab (\u0160ari\u0107 et al., 2012) from STS 2012, 5 and evaluate them on the CORE dataset. They serve as two strong contenders since they ranked 1st (DKPro) and 2nd (TakeLab) in last year's STS task.",
"cite_spans": [
{
"start": 415,
"end": 433,
"text": "(Bar et al., 2012)",
"ref_id": "BIBREF4"
},
{
"start": 438,
"end": 466,
"text": "TakeLab (\u0160ari\u0107 et al., 2012)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "The Baseline Systems",
"sec_num": "4.2"
},
{
"text": "For the TYPED dataset, we first produce XML files for each of the items, using the fields as provided to participants. Then we run named entity recognition and classification (NERC) and date detection using Stanford CoreNLP. This is followed by calculating the similarity score for each of the types as follows.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The Baseline Systems",
"sec_num": "4.2"
},
{
"text": "\u2022 General: cosine similarity of TF-IDF vectors of tokens from all fields. \u2022 Author: cosine similarity of TF-IDF vectors for dc:Creator field. \u2022 People involved, time period and location: cosine similarity of TF-IDF vectors of location/date/people recognized by NERC in all fields. \u2022 Events: cosine similarity of TF-IDF vectors of verbs in all fields. \u2022 Subject and description: cosine similarity of TF-IDF vectors of respective fields. IDF values are calculated from a subset of the Europeana collection (Culture Grid collection). We also run a random baseline several times, yielding close to 0 correlations in all datasets, as expected.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The Baseline Systems",
"sec_num": "4.2"
},
{
"text": "Participants could send a maximum of three system runs. After downloading the test datasets, they had a maximum of 120 hours to upload the results. 34 teams participated in the CORE task, submitting 89 Table 2 : Results on the CORE task. The first rows on the left correspond to the baseline and to two publicly available systems, see text for details. Note: \u2020 signals team involving one of the organizers, \u2021 for systems submitting past the 120 hour window. system runs. For the TYPED task, 6 teams participated, submitting 14 system runs. 6 Some submissions had minor issues: one team had a confidence score of 0 for all items (we replaced them by 100), and another team had a few Not-a-Number scores for the SMT dataset, which we replaced by 5. One team submitted the results past the 120 hours. This team, and the teams that in-6 Due to lack of space we can't detail the full names of authors and institutions that participated.The interested reader can use the name of the runs in Tables 2 and 3 to find the relevant paper in these proceedings.",
"cite_spans": [
{
"start": 540,
"end": 541,
"text": "6",
"ref_id": null
}
],
"ref_spans": [
{
"start": 202,
"end": 209,
"text": "Table 2",
"ref_id": null
},
{
"start": 985,
"end": 999,
"text": "Tables 2 and 3",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Participation",
"sec_num": "4.3"
},
{
"text": "cluded one of the organizers, are explicitly marked. We want to stress that in these teams the organizers did not allow the developers of the system to access any data or information which was not available for the rest of participants. After the submission deadline expired, the organizers published the gold standard in the task website, in order to ensure a transparent evaluation process. Table 2 shows the results of the CORE task, with runs listed in alphabetical order. The correlation in each dataset is given, followed by the mean correlation (the official measure), and the rank of the run. The baseline ranks 73. The highest correlations are for OnWN (84%, by deft) and HDL (78%, by UMBC), followed by FNWN (58%, by UMBC) and SMT (40%, by NTNU). This fits nicely with the inter-tagger correlations (respectively 87, 85, 70 and 65, cf. Section 3) . It also shows that the systems get close to the human correlations in the OnWN and HDL dataset, with bigger differences for FNWN and SMT.",
"cite_spans": [
{
"start": 809,
"end": 826,
"text": "(respectively 87,",
"ref_id": null
},
{
"start": 827,
"end": 830,
"text": "85,",
"ref_id": null
},
{
"start": 831,
"end": 841,
"text": "70 and 65,",
"ref_id": null
},
{
"start": 842,
"end": 856,
"text": "cf. Section 3)",
"ref_id": null
}
],
"ref_spans": [
{
"start": 393,
"end": 400,
"text": "Table 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Participation",
"sec_num": "4.3"
},
{
"text": "The result of the best run (by UMBC) is significantly different (p-value < 0.05) than all runs except the second best. The second best run is only significantly different to the runs ranking 7th and below, and the third best to the 14th run and below. The difference between consecutive runs was not significant. This indicates that many system runs performed very close to each other.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "CORE Task Results",
"sec_num": "4.4"
},
{
"text": "Only 13 runs included non-uniform confidence scores. In 10 cases the confidence value allowed to improve performance, sometimes as much as .11 absolute points. For instance, SXUCFN-run3 improves from .4773 to .5458. The most notable exception is MayoClinicNLP-r2CDT, which achieves a mean correlation of .5879 instead of .5572 if they provide uniform confidence values.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "CORE Task Results",
"sec_num": "4.4"
},
{
"text": "The Table also shows the results of TakeLab and DKPro. We train the DKPro and TakeLab-sts12 models on all the training and test STS 2012 data. We additionally train another variant system of TakeLab, TakeLab-best, where we use targeted training where the model yields the best per-formance for each test subset as follows: (1) HDL is trained on MSRpar 2012 data; (2) OnWN is trained on all 2012 data; (3) FnWN is trained on 2012 OnWN data; (4) SMT is trained on 2012 SM-Teuroparl data. Note that Takelab-best is an upper bound, as the best combination is selected on the test dataset. TakeLab-sts12, TakeLab-best, DKPro rank as 58th, 27th and 6th in this year's system submissions, respectively. The different results yielded from TakeLab depending on the training data suggests that some STS systems are quite sensitive to the source of the sentence pairs, indicating that domain adaptation techniques could have a role in this task. On the other hand, DKPro performed extremely well when trained on all available training, with no special tweaking for each dataset. Table 3 shows the results of TYPED task. The columns show the correlation for each type of similarity, followed by the mean correlation (the official measure), and the rank of the run. The best system (from Unitor) is best in all types. The baseline ranked 8th, but the performance difference with the best system is quite significant. The best result is significantly different (p-value < 0.02) to all runs. The second and third best runs are only significantly different from the run ranking 5th and below. Note that in this dataset the correlations of the best system are higher than the inter-tagger correlations. This might indicate that the task has been solved, in the sense that the features used by the top systems are enough to characterize the problem and reach human performance, although the correlations of some types could be too low for practical use.",
"cite_spans": [],
"ref_spans": [
{
"start": 4,
"end": 14,
"text": "Table also",
"ref_id": null
},
{
"start": 1068,
"end": 1075,
"text": "Table 3",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "CORE Task Results",
"sec_num": "4.4"
},
{
"text": "The organizers asked participants to submit a description file, making special emphasis on the tools and resources that were used. Tables 4 and 5 show schematically the tools and resources as reported by some of the participants for the CORE and TYPED tasks (respectively). In the last row, the totals show that WordNet and monolingual corpora were the most used resources for both tasks, followed by Wikipedia and the use of acronyms (for CORE and TYPED tasks respectively). Dictionaries, multilingual corpora, opinion and sentiment analysis, and lists and tables of paraphrases are also used.",
"cite_spans": [],
"ref_spans": [
{
"start": 131,
"end": 145,
"text": "Tables 4 and 5",
"ref_id": null
}
],
"eq_spans": [],
"section": "Tools and resources used",
"sec_num": "5"
},
{
"text": "For CORE, generic NLP tools such as lemmatization and PoS tagging are widely used, and to a lesser extent, distributional similarity, knowledgebased similarity, syntactic analysis, named entity recognition, lexical substitution and time and date resolution (in this order). Other popular tools are Semantic Role Labeling, Textual Entailment, String Similarity, Tree Kernels and Word Sense Disambiguation. Machine learning is widely used to combine and tune components (and so, it is not mentioned in the tables). Several less used tools are also listed but are used by three or less systems. The top scoring systems use most of the resources and tools listed (UMBC EBIQUITY-ParingWords, MayoClinicNLP-r3wtCD). Other well ranked systems like deft-baseline are only based on distributional similarity. Although not mentioned in the descriptions files, some systems used the publicly available DKPro and Takelab systems.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Tools and resources used",
"sec_num": "5"
},
{
"text": "For the TYPED task, the most used tools are lemmatizers, Named Entity Recognizers, and PoS taggers. Distributional and Knowledge-base similarity is also used, and at least four systems used syntactic analysis and time and date resolution. 7",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Tools and resources used",
"sec_num": "5"
},
{
"text": "We presented the 2013 *SEM shared task on Semantic Textual Similarity. 8 Two tasks were defined: a Table 5 : TYPED task: Resources and tools used by the systems that submitted a description file. Leftmost columns correspond to the resources, and rightmost to tools, in alphabetic order. core task CORE similar to the STS 2012 task, and a new pilot on typed-similarity TYPED. We had 34 teams participate in both tasks submitting 89 system runs for CORE and 14 system runs for TYPED, in total amounting to a 103 system evaluations. CORE uses datasets which are related to but different from those used in 2012: news headlines, MT evaluation data, gloss pairs. The best systems attained correlations close to the human inter tagger correlations. The TYPED task characterizes, for the first time, the reasons why two items are deemed similar. The results on TYPED show that the training data provided allowed systems to yield high correlation scores, demonstrating the practical viability of this new task. In the future, we are planning on adding more nuanced evaluation data sets that include modality (belief, negation, permission, etc.) and sentiment. Also given the success rate of the TYPED task, however, the data in this pilot is relatively structured, hence in the future we are interested in investigating identifying reasons why two pairs of unstructured texts as those present in CORE are deemed similar.",
"cite_spans": [],
"ref_spans": [
{
"start": 99,
"end": 106,
"text": "Table 5",
"ref_id": null
}
],
"eq_spans": [],
"section": "Conclusions and Future Work",
"sec_num": "6"
},
{
"text": "work was partially funded by the DARPA BOLT and DEFT programs.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions and Future Work",
"sec_num": "6"
},
{
"text": "We want to thank Nikolaos Aletras, German Rigau and Mark Stevenson for their help designing, annotating and collecting the typed-similarity data. The development of the typed-similarity dataset was supported by the PATHS project (http://paths-project.eu) funded by the European Community's Seventh Framework Program (FP7/2007 -2013 under grant agreement no. 270082. The tasks were partially financed by the READERS project under the CHIST-ERA framework (FP7 ERA-Net). We thank Europeana and all contributors to Europeana for sharing their content through the API.",
"cite_spans": [
{
"start": 308,
"end": 325,
"text": "Program (FP7/2007",
"ref_id": null
},
{
"start": 326,
"end": 331,
"text": "-2013",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions and Future Work",
"sec_num": "6"
},
{
"text": "http://www.cs.columbia.edu/\u02dcweiwei/ workshop/",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "http://www.europeana.eu/",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "http://www.paths-project.eu",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "http://en.wikipedia.org/wiki/Pearson_ product-moment_correlation_coefficient# Calculating_a_weighted_correlation",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Code is available at http://www-nlp.stanford. edu/wiki/STS",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "For a more detailed analysis, the reader is directed to the papers in this volume.8 All annotations, evaluation scripts and system outputs are available in the website for the task 9 . In addition, a collabora-",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "We are grateful to the OntoNotes team for sharing OntoNotes to WordNet mappings (Hovy et al. 2006) . We thank Language Weaver, INC, DARPA and LDC for providing the SMT data. This work is also partially funded by the Spanish Ministry of Education, Culture and Sport (grant FPU12/06243). This a comprehensive list of evaluation tasks, datasets, software and papers related to STS.",
"cite_spans": [
{
"start": 80,
"end": 98,
"text": "(Hovy et al. 2006)",
"ref_id": "BIBREF8"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgements",
"sec_num": null
},
{
"text": ". 6040 .7362 .3663 .4685 .3844 .4057 .5229 .6027 .5113 5 ECNUCS-Run2 .6064 .5684 .3663 .4685 .3844 .4057 .5563 .6027 .4948 7 PolyUCOMP-RUN1 .4888 .6940 .3223 .3820 .3621 .1625 .3962 .4816 .4112 12 PolyUCOMP-RUN2 .4893 .6940 .3253 .3777 .3628 .1968 .3962 .4816 .4155 11 PolyUCOMP-RUN3 .4915 .6940 .3254 .3737 .3667 .2207 11 2 12 54 12 5 11 36 7 3 54 3 3 48 40 2 67 14 3 3 10 24 55 3 3 4 9 6 34 9 13 6 6 ",
"cite_spans": [
{
"start": 2,
"end": 175,
"text": "6040 .7362 .3663 .4685 .3844 .4057 .5229 .6027 .5113 5 ECNUCS-Run2 .6064 .5684 .3663 .4685 .3844 .4057 .5563 .6027 .4948 7 PolyUCOMP-RUN1 .4888 .6940 .3223 .3820 .3621 .1625",
"ref_id": null
},
{
"start": 176,
"end": 247,
"text": ".3962 .4816 .4112 12 PolyUCOMP-RUN2 .4893 .6940 .3253 .3777 .3628 .1968",
"ref_id": null
},
{
"start": 248,
"end": 319,
"text": ".3962 .4816 .4155 11 PolyUCOMP-RUN3 .4915 .6940 .3254 .3737 .3667 .2207",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "annex",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "OnWN FNWN SMT Mean # baseline-tokencos",
"authors": [
{
"first": "",
"middle": [],
"last": "Head",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Onwn",
"suffix": ""
},
{
"first": "#",
"middle": [],
"last": "Smt Mean",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Team",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Head",
"suffix": ""
}
],
"year": null,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Head. OnWN FNWN SMT Mean # Team and run Head. OnWN FNWN SMT Mean # baseline-tokencos .5399 .2828 .2146 .2861 .3639 73",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Exploring evaluation measures for semantic textual similarity",
"authors": [],
"year": null,
"venue": "prep",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "References Eneko Agirre and Enrique Amig\u00f3. In prep. Exploring evaluation measures for semantic textual similarity. In Unpublished manuscript.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Semeval-2012 task 6: A pilot on semantic textual similarity",
"authors": [
{
"first": "Eneko",
"middle": [],
"last": "Agirre",
"suffix": ""
},
{
"first": "Daniel",
"middle": [],
"last": "Cer",
"suffix": ""
},
{
"first": "Mona",
"middle": [],
"last": "Diab",
"suffix": ""
},
{
"first": "Aitor",
"middle": [],
"last": "Gonzalez-Agirre",
"suffix": ""
}
],
"year": 2012,
"venue": "*SEM 2012: The First Joint Conference on Lexical and Computational Semantics",
"volume": "1",
"issue": "",
"pages": "7--8",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Eneko Agirre, Daniel Cer, Mona Diab, and Aitor Gonzalez-Agirre. 2012. Semeval-2012 task 6: A pilot on semantic textual similarity. In *SEM 2012: The First Joint Conference on Lexical and Computa- tional Semantics -Volume 1: Proceedings of the main conference and the shared task, and Volume 2: Pro- ceedings of the Sixth International Workshop on Se- mantic Evaluation (SemEval 2012), pages 385-393, Montr\u00e9al, Canada, 7-8 June. Association for Compu- tational Linguistics.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "The berkeley framenet project",
"authors": [
{
"first": "Collin",
"middle": [
"F"
],
"last": "Baker",
"suffix": ""
},
{
"first": "Charles",
"middle": [
"J"
],
"last": "Fillmore",
"suffix": ""
},
{
"first": "John",
"middle": [
"B"
],
"last": "Lowe",
"suffix": ""
}
],
"year": 1998,
"venue": "COLING '98 Proceedings of the 17th international conference on Computational linguistics",
"volume": "1",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Collin F. Baker, Charles J. Fillmore, and John B. Lowe. 1998. The berkeley framenet project. In COLING '98 Proceedings of the 17th international conference on Computational linguistics -Volume 1.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Ukp: Computing semantic textual similarity by combining multiple content similarity measures",
"authors": [
{
"first": "Daniel",
"middle": [],
"last": "Bar",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "Biemann",
"suffix": ""
},
{
"first": "Iryna",
"middle": [],
"last": "Gurevych",
"suffix": ""
},
{
"first": "Torsten",
"middle": [],
"last": "Zesch",
"suffix": ""
}
],
"year": 2012,
"venue": "Proceedings of the 6th International Workshop on Semantic Evaluation, in conjunction with the 1st Joint Conference on Lexical and Computational Semantics",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Daniel Bar, Chris Biemann, Iryna Gurevych, and Torsten Zesch. 2012. Ukp: Computing semantic textual sim- ilarity by combining multiple content similarity mea- sures. In Proceedings of the 6th International Work- shop on Semantic Evaluation, in conjunction with the 1st Joint Conference on Lexical and Computational Semantics.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Europe media monitorsystem description",
"authors": [
{
"first": "Clive",
"middle": [],
"last": "Best",
"suffix": ""
},
{
"first": "Erik",
"middle": [],
"last": "Van Der Goot",
"suffix": ""
},
{
"first": "Ken",
"middle": [],
"last": "Blackler",
"suffix": ""
}
],
"year": 2005,
"venue": "EUR Report 22173-En",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Clive Best, Erik van der Goot, Ken Blackler, Tefilo Gar- cia, and David Horby. 2005. Europe media monitor - system description. In EUR Report 22173-En, Ispra, Italy.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Hyter: Meaning-equivalent semantics for translation evaluation",
"authors": [
{
"first": "Markus",
"middle": [],
"last": "Dreyer",
"suffix": ""
},
{
"first": "Daniel",
"middle": [],
"last": "Marcu",
"suffix": ""
}
],
"year": 2012,
"venue": "Human Language Technologies: Conference of the North American Chapter of the Association of Computational Linguistics",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Markus Dreyer and Daniel Marcu. 2012. Hyter: Meaning-equivalent semantics for translation evalua- tion. In Human Language Technologies: Conference of the North American Chapter of the Association of Computational Linguistics.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "WordNet: An Electronic Lexical Database",
"authors": [
{
"first": "Christiane",
"middle": [],
"last": "Fellbaum",
"suffix": ""
}
],
"year": 1998,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Christiane Fellbaum. 1998. WordNet: An Electronic Lexical Database. MIT Press.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Ontonotes: The 90% solution",
"authors": [
{
"first": "Eduard",
"middle": [],
"last": "Hovy",
"suffix": ""
},
{
"first": "Mitchell",
"middle": [],
"last": "Marcus",
"suffix": ""
},
{
"first": "Martha",
"middle": [],
"last": "Palmer",
"suffix": ""
},
{
"first": "Lance",
"middle": [],
"last": "Ramshaw",
"suffix": ""
},
{
"first": "Ralph",
"middle": [],
"last": "Weischedel",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of the Human Language Technology Conference of the North American Chapter of the ACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Eduard Hovy, Mitchell Marcus, Martha Palmer, Lance Ramshaw, and Ralph Weischedel. 2006. Ontonotes: The 90% solution. In Proceedings of the Human Lan- guage Technology Conference of the North American Chapter of the ACL.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Numerical Recipes: The Art of Scientific Computing V 2.10 With Linux Or Single-Screen License",
"authors": [
{
"first": "W",
"middle": [
"H"
],
"last": "Press",
"suffix": ""
},
{
"first": "S",
"middle": [
"A"
],
"last": "Teukolsky",
"suffix": ""
},
{
"first": "W",
"middle": [
"T"
],
"last": "Vetterling",
"suffix": ""
},
{
"first": "B",
"middle": [
"P"
],
"last": "Flannery",
"suffix": ""
}
],
"year": 2002,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "W.H. Press, S.A. Teukolsky, W.T. Vetterling, and B.P. Flannery. 2002. Numerical Recipes: The Art of Sci- entific Computing V 2.10 With Linux Or Single-Screen License. Cambridge University Press.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "A study of translation edit rate with targeted human annotation",
"authors": [
{
"first": "Matthew",
"middle": [],
"last": "Snover",
"suffix": ""
},
{
"first": "Bonnie",
"middle": [],
"last": "Dorr",
"suffix": ""
},
{
"first": "Richard",
"middle": [],
"last": "Schwartz",
"suffix": ""
},
{
"first": "Linnea",
"middle": [],
"last": "Micciulla",
"suffix": ""
},
{
"first": "John",
"middle": [],
"last": "Makhoul",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of Association for Machine Translation in the Americas",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Matthew Snover, Bonnie Dorr, Richard Schwartz, Lin- nea Micciulla, and John Makhoul. 2006. A study of translation edit rate with targeted human annotation. In Proceedings of Association for Machine Translation in the Americas.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Takelab: Systems for measuring semantic text similarity",
"authors": [
{
"first": "Goran",
"middle": [],
"last": "Frane\u0161ari\u0107",
"suffix": ""
},
{
"first": "Mladen",
"middle": [],
"last": "Glava\u0161",
"suffix": ""
},
{
"first": "Jan\u0161najder",
"middle": [],
"last": "Karan",
"suffix": ""
},
{
"first": "Bojana Dalbelo",
"middle": [],
"last": "Ba\u0161i\u0107",
"suffix": ""
}
],
"year": 2012,
"venue": "Proceedings of the Sixth International Workshop on Semantic Evaluation",
"volume": "",
"issue": "",
"pages": "441--448",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Frane\u0160ari\u0107, Goran Glava\u0161, Mladen Karan, Jan\u0160najder, and Bojana Dalbelo Ba\u0161i\u0107. 2012. Takelab: Sys- tems for measuring semantic text similarity. In Pro- ceedings of the Sixth International Workshop on Se- mantic Evaluation (SemEval 2012), pages 441-448, Montr\u00e9al, Canada, 7-8 June. Association for Compu- tational Linguistics.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"type_str": "figure",
"text": "Annotation values with explanations and examples for the core STS task.",
"num": null,
"uris": null
},
"FIGREF1": {
"type_str": "figure",
"text": "Figure 2: Annotation instructions for CORE task",
"num": null,
"uris": null
},
"FIGREF2": {
"type_str": "figure",
"text": "Annotation instructions for TYPED task vs. machine translation. b. reference vs. Finite State Transducer (FST) generated translation",
"num": null,
"uris": null
},
"FIGREF3": {
"type_str": "figure",
"text": "TYPED pair on our survey. Only general and author similarity types are shown.",
"num": null,
"uris": null
},
"FIGREF4": {
"type_str": "figure",
"text": "\u2022 dcCreator: creator(s) of the item \u2022 dcDate: date(s) of the item \u2022 dcSource: source of the item",
"num": null,
"uris": null
},
"TABREF1": {
"type_str": "table",
"num": null,
"html": null,
"text": "Summary of STS 2012 and 2013 datasets.",
"content": "<table/>"
},
"TABREF2": {
"type_str": "table",
"num": null,
"html": null,
"text": "Results on TYPED task. The first row corresponds to the baseline. Note: \u2020 signals team involving one of the organizers.",
"content": "<table/>"
}
}
}
} |