File size: 98,114 Bytes
6fa4bc9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 | {
"paper_id": "S13-1015",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T15:42:51.666263Z"
},
"title": "UMCC_DLSI: Textual Similarity based on Lexical-Semantic features",
"authors": [
{
"first": "Alexander",
"middle": [],
"last": "Ch\u00e1vez",
"suffix": "",
"affiliation": {},
"email": "alexander.chavez@umcc.cu"
},
{
"first": "Antonio",
"middle": [
"Fern\u00e1ndez"
],
"last": "Orqu\u00edn",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "H\u00e9ctor",
"middle": [],
"last": "D\u00e1vila",
"suffix": "",
"affiliation": {},
"email": "hector.davila@umcc.cu"
},
{
"first": "Yoan",
"middle": [],
"last": "Guti\u00e9rrez",
"suffix": "",
"affiliation": {},
"email": "yoan.gutierrez@umcc.cu"
},
{
"first": "Armando",
"middle": [],
"last": "Collazo",
"suffix": "",
"affiliation": {},
"email": "armando.collazo@umcc.cu"
},
{
"first": "Jos\u00e9",
"middle": [
"I"
],
"last": "Abreu",
"suffix": "",
"affiliation": {},
"email": "jose.abreu@umcc.cu"
},
{
"first": "Andr\u00e9s",
"middle": [],
"last": "Montoyo",
"suffix": "",
"affiliation": {},
"email": "montoyo@dlsi.ua.es"
},
{
"first": "Rafael",
"middle": [],
"last": "Mu\u00f1oz",
"suffix": "",
"affiliation": {},
"email": "rafael@dlsi.ua.es"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "This paper describes the specifications and results of UMCC_DLSI system, which participated in the Semantic Textual Similarity task (STS) of SemEval-2013. Our supervised system uses different types of lexical and semantic features to train a Bagging classifier used to decide the correct option. Related to the different features we can highlight the resource ISR-WN used to extract semantic relations among words and the use of different algorithms to establish semantic and lexical similarities. In order to establish which features are the most appropriate to improve STS results we participated with three runs using different set of features. Our best run reached the position 44 in the official ranking, obtaining a general correlation coefficient of 0.61.",
"pdf_parse": {
"paper_id": "S13-1015",
"_pdf_hash": "",
"abstract": [
{
"text": "This paper describes the specifications and results of UMCC_DLSI system, which participated in the Semantic Textual Similarity task (STS) of SemEval-2013. Our supervised system uses different types of lexical and semantic features to train a Bagging classifier used to decide the correct option. Related to the different features we can highlight the resource ISR-WN used to extract semantic relations among words and the use of different algorithms to establish semantic and lexical similarities. In order to establish which features are the most appropriate to improve STS results we participated with three runs using different set of features. Our best run reached the position 44 in the official ranking, obtaining a general correlation coefficient of 0.61.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "SemEval-2013 (Agirre et al., 2013) presents the task Semantic Textual Similarity (STS) again. In STS, the participating systems must examine the degree of semantic equivalence between two sentences. The goal of this task is to create a unified framework for the evaluation of semantic textual similarity modules and to characterize their impact on NLP applications.",
"cite_spans": [
{
"start": 13,
"end": 34,
"text": "(Agirre et al., 2013)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "STS is related to Textual Entailment (TE) and Paraphrase tasks. The main difference is that STS assumes bidirectional graded equivalence between the pair of textual snippets.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In case of TE, the equivalence is directional (e.g. a student is a person, but a person is not necessarily a student). In addition, STS differs from TE and Paraphrase in that, rather than being a binary yes/no decision, STS is a similarity-graded notion (e.g. a student is more similar to a person than a dog to a person).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "This graded bidirectional is useful for NLP tasks such as Machine Translation (MT), Information Extraction (IE), Question Answering (QA), and Summarization. Several semantic tasks could be added as modules in the STS framework, \"such as Word Sense Disambiguation and Induction, Lexical Substitution, Semantic Role Labeling, Multiword Expression detection and handling, Anaphora and Co-reference resolution, Time and Date resolution and Named Entity, among others\" 1",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "This edition of SemEval-2013 remain with the same classification approaches that in their first version in 2012. The output of different systems was compared to the reference scores provided by SemEval-2013 gold standard file, which range from five to zero according to the next criterions 2 : (5) \"The two sentences are equivalent, as they mean the same thing\". (4) \"The two sentences are mostly equivalent, but some unimportant details differ\". (3) \"The two sentences are roughly equivalent, but some important information differs/missing\". (2) \"The two sentences are not equivalent, but share some details\". (1) \"The two sentences are not equivalent, but are on the same topic\". (0) \"The two sentences are on different topics\".",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Description of 2013 pilot task",
"sec_num": "1.1"
},
{
"text": "After this introduction, the rest of the paper is organized as follows. Section 3 shows the Related Works. Section 4 presents our system architecture and description of the different runs. In section 4 we describe the different features used in our system. Results and a discussion are provided in Section 5 and finally we conclude in Section 6.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Description of 2013 pilot task",
"sec_num": "1.1"
},
{
"text": "There are more extensive literature on measuring the similarity between documents than to between sentences. Perhaps the most recently scenario is constituted by the competition of SemEval-2012 task 6: A Pilot on Semantic Textual Similarity (Aguirre and Cerd, 2012) . In SemEval-2012, there were used different tools and resources like stop word list, multilingual corpora, dictionaries, acronyms, and tables of paraphrases, \"but WordNet was the most used resource, followed by monolingual corpora and Wikipedia\" (Aguirre and Cerd, 2012) .",
"cite_spans": [
{
"start": 241,
"end": 265,
"text": "(Aguirre and Cerd, 2012)",
"ref_id": "BIBREF2"
},
{
"start": 513,
"end": 537,
"text": "(Aguirre and Cerd, 2012)",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Works",
"sec_num": "2"
},
{
"text": "According to Aguirre, Generic NLP tools were widely used. Among those that stand out were tools for lemmatization and POS-tagging (Aguirre and Cerd, 2012) . On a smaller scale word sense disambiguation, semantic role labeling and time and date resolution. In addition, Knowledge-based and distributional methods were highly used. Aguirre and Cerd remarked on (Aguirre and Cerd, 2012 ) that alignment and/or statistical machine translation software, lexical substitution, string similarity, textual entailment and machine translation evaluation software were used to a lesser extent. It can be noted that machine learning was widely used to combine and tune components.",
"cite_spans": [
{
"start": 130,
"end": 154,
"text": "(Aguirre and Cerd, 2012)",
"ref_id": "BIBREF2"
},
{
"start": 359,
"end": 382,
"text": "(Aguirre and Cerd, 2012",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Works",
"sec_num": "2"
},
{
"text": "Most of the knowledge-based methods \"obtain a measure of relatedness by utilizing lexical resources and ontologies such as WordNet (Miller et al., 1990b) to measure definitional overlap, term distance within a graphical taxonomy, or term depth in the taxonomy as a measure of specificity\" (Banea et al., 2012) .",
"cite_spans": [
{
"start": 131,
"end": 153,
"text": "(Miller et al., 1990b)",
"ref_id": "BIBREF22"
},
{
"start": 289,
"end": 309,
"text": "(Banea et al., 2012)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Works",
"sec_num": "2"
},
{
"text": "Some scholars as in (Corley and Mihalcea, June 2005) have argue \"the fact that a comprehensive metric of text semantic similarity should take into account the relations between words, as well as the role played by the various entities involved in the interactions described by each of the two sentences\". This idea is resumed in the Principle of Compositionality, this principle posits that the meaning of a complex expression is determined by the meanings of its constituent expressions and the rules used to combine them (Werning et al., 2005) . Corley and Mihalcea in this article combined metrics of word-to-word similarity, and language models into a formula and they pose that this is a potentially good indicator of the semantic similarity of the two input texts sentences. They modeled the semantic similarity of a sentence as a function of the semantic similarity of the component words (Corley and Mihalcea, June 2005) .",
"cite_spans": [
{
"start": 20,
"end": 52,
"text": "(Corley and Mihalcea, June 2005)",
"ref_id": "BIBREF9"
},
{
"start": 523,
"end": 545,
"text": "(Werning et al., 2005)",
"ref_id": null
},
{
"start": 896,
"end": 928,
"text": "(Corley and Mihalcea, June 2005)",
"ref_id": "BIBREF9"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Works",
"sec_num": "2"
},
{
"text": "One of the top scoring systems at SemEval-2012 (\u0160ari\u0107 et al., 2012) tended to use most of the aforementioned resources and tools. They predict the human ratings of sentence similarity using a support-vector regression model with multiple features measuring word-overlap similarity and syntax similarity. They also compute the similarity between sentences using the semantic alignment of lemmas. First, they compute the word similarity between all pairs of lemmas from first to second sentence, using either the knowledge-based or the corpus-based semantic similarity. They named this method Greedy Lemma Aligning Overlap.",
"cite_spans": [
{
"start": 47,
"end": 67,
"text": "(\u0160ari\u0107 et al., 2012)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Works",
"sec_num": "2"
},
{
"text": "Daniel B\u00e4r presented the UKP system, which performed best in the Semantic Textual Similarity (STS) task at SemEval-2012 in two out of three metrics. It uses a simple log-linear regression model, trained on the training data, to combine multiple text similarity measures of varying complexity.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Works",
"sec_num": "2"
},
{
"text": "As we can see in Figure 1 , our three runs begin with the pre-processing of SemEval-2013's training set. Every sentence pair is tokenized, lemmatized and POS-tagged using Freeling 2.2 tool (Atserias et al., 2006) . Afterwards, several methods and algorithms are applied in order to extract all features for our Machine Learning System (MLS). Each run uses a particular group of features. The Run 1 (named MultiSemLex) is our main run. This takes into account all extracted features and trains a model with a Bagging classifier (Breiman, 1996 ) (using REPTree). The training corpus has been provided by SemEval-2013 competition, in concrete by the Semantic Textual Similarity task.",
"cite_spans": [
{
"start": 189,
"end": 212,
"text": "(Atserias et al., 2006)",
"ref_id": null
},
{
"start": 527,
"end": 541,
"text": "(Breiman, 1996",
"ref_id": "BIBREF8"
}
],
"ref_spans": [
{
"start": 17,
"end": 25,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "System architecture and description of the runs",
"sec_num": "3"
},
{
"text": "The Run 2 (named MultiLex) and Run 3 (named MultiSem) use the same classifier, but including different features. Run 2 uses (see Figure 1 ) features extracted from Lexical-Semantic Metrics (LS-M) described in section 4.1, and Lexical-Semantic Alignment (LS-A) described in section 4.2.",
"cite_spans": [],
"ref_spans": [
{
"start": 129,
"end": 137,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "System architecture and description of the runs",
"sec_num": "3"
},
{
"text": "On the other hand, Run 3 uses features extracted only from Semantic Alignment (SA) described in section 4.3.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "System architecture and description of the runs",
"sec_num": "3"
},
{
"text": "As a result, we obtain three trained models capable to estimate the similarity value between two phrases.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "System architecture and description of the runs",
"sec_num": "3"
},
{
"text": "Finally, we test our system with the SemEval-2013 test set (see Table 14 with the results of our three runs). The following section describes the features extraction process.",
"cite_spans": [],
"ref_spans": [
{
"start": 64,
"end": 72,
"text": "Table 14",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "System architecture and description of the runs",
"sec_num": "3"
},
{
"text": "Many times when two phrases are very similar, one sentence is in a high degree lexically overlapped by the other. Inspired in this fact we developed various algorithms, which measure the level of overlapping by computing a quantity of matching words in a pair of phrases. In our system, we used as features for a MLS lexical and semantic similarity measures. Other features were extracted from a lexical-semantic sentences alignment and a variant using only a semantic alignment.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Description of the features used in the Machine Learning System",
"sec_num": "4"
},
{
"text": "We have used well-known string based similarity measures like: Needleman-Wunch (sequence alignment), Smith-Waterman (sequence alignment), Smith-Waterman-Gotoh, Smith-Waterman-Gotoh-Windowed-Affine, Jaro, Jaro-Winkler, Chapman-Length-Deviation, Chapman-Mean-Length, QGram-Distance, Block-Distance, Cosine Similarity, Dice Similarity, Euclidean Distance, Jaccard Similarity, Matching Coefficient, Monge-Elkan and Overlap-Coefficient. These algorithms have been obtained from an API (Application Program Interface) SimMetrics library v1.5 for .NET 2.0 3 . We obtained 17 features for our MLS from these similarity measures. Using Levenshtein's edit distance (LED), we computed also two different algorithms in order to obtain the alignment of the phrases. In the first one, we considered a value of the alignment as the LED between two sentences. Contrary to (Tatu et al., 2006) , we do not remove the punctuation or stop words from the sentences, neither consider different cost for transformation operation, and we used all the operations (deletion, insertion and substitution).",
"cite_spans": [
{
"start": 856,
"end": 875,
"text": "(Tatu et al., 2006)",
"ref_id": "BIBREF29"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Similarity measures",
"sec_num": "4.1"
},
{
"text": "The second one is a variant that we named Double Levenshtein's Edit Distance (DLED) (see Table 9 for detail). For this algorithm, we used LED to measure the distance between the phrases, but in order to compare the words, we used LED again (Fern\u00e1ndez et al., 2012; Fern\u00e1ndez Orqu\u00edn et al., 2009) .",
"cite_spans": [
{
"start": 240,
"end": 264,
"text": "(Fern\u00e1ndez et al., 2012;",
"ref_id": null
},
{
"start": 265,
"end": 295,
"text": "Fern\u00e1ndez Orqu\u00edn et al., 2009)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [
{
"start": 89,
"end": 96,
"text": "Table 9",
"ref_id": null
}
],
"eq_spans": [],
"section": "Similarity measures",
"sec_num": "4.1"
},
{
"text": "Another distance we used is an extension of LED named Extended Distance (in spanish distancia extendida (DEx)) (see (Fern\u00e1ndez et al., 2012; Fern\u00e1ndez Orqu\u00edn et al., 2009) for details). This algorithm is an extension of the Levenshtein's algorithm, with which penalties are applied by considering what kind of transformation (insertion, deletion, substitution, or non-operation) and the position it was carried out, along with the character involved in the operation. In addition to the cost matrixes used by Levenshtein's algorithm, DEx also obtains the Longest Common Subsequence (LCS) (Hirschberg, 1977) and other helpful attributes for determining similarity between strings in a single iteration. It is worth noting that the inclusion of all these penalizations makes the DEx algorithm a good candidate for our approach.",
"cite_spans": [
{
"start": 116,
"end": 140,
"text": "(Fern\u00e1ndez et al., 2012;",
"ref_id": null
},
{
"start": 141,
"end": 171,
"text": "Fern\u00e1ndez Orqu\u00edn et al., 2009)",
"ref_id": "BIBREF13"
},
{
"start": 588,
"end": 606,
"text": "(Hirschberg, 1977)",
"ref_id": "BIBREF17"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Similarity measures",
"sec_num": "4.1"
},
{
"text": "In our previous work (Fern\u00e1ndez Orqu\u00edn et al., 2009) , DEx demonstrated excellent results when it was compared with other distances as (Levenshtein, 1965) , (Neeedleman and Wunsch, 1970) , (Winkler, 1999) . We also used as a feature the Minimal Semantic Distances (Breadth First Search (BFS)) obtained between the most relevant concepts of both sentences. The relevant concepts pertain to semantic resources ISR-WN (Guti\u00e9rrez et al., 2011; 2010a) , as WordNet (Miller et al., 1990a) , WordNet Affect (Strapparava and Valitutti, 2004) , SUMO (Niles and Pease, 2001 ) and Semantic Classes (Izquierdo et al., 2007) . Those concepts were obtained after having applied the Association Ratio (AR) measure between concepts and words over each sentence. (We refer reader to (Guti\u00e9rrez et al., 2010b) for a further description).",
"cite_spans": [
{
"start": 21,
"end": 52,
"text": "(Fern\u00e1ndez Orqu\u00edn et al., 2009)",
"ref_id": "BIBREF13"
},
{
"start": 135,
"end": 154,
"text": "(Levenshtein, 1965)",
"ref_id": "BIBREF20"
},
{
"start": 157,
"end": 186,
"text": "(Neeedleman and Wunsch, 1970)",
"ref_id": "BIBREF24"
},
{
"start": 189,
"end": 204,
"text": "(Winkler, 1999)",
"ref_id": "BIBREF31"
},
{
"start": 415,
"end": 439,
"text": "(Guti\u00e9rrez et al., 2011;",
"ref_id": "BIBREF16"
},
{
"start": 440,
"end": 446,
"text": "2010a)",
"ref_id": "BIBREF14"
},
{
"start": 460,
"end": 482,
"text": "(Miller et al., 1990a)",
"ref_id": null
},
{
"start": 500,
"end": 533,
"text": "(Strapparava and Valitutti, 2004)",
"ref_id": "BIBREF28"
},
{
"start": 541,
"end": 563,
"text": "(Niles and Pease, 2001",
"ref_id": "BIBREF25"
},
{
"start": 587,
"end": 611,
"text": "(Izquierdo et al., 2007)",
"ref_id": "BIBREF18"
},
{
"start": 766,
"end": 791,
"text": "(Guti\u00e9rrez et al., 2010b)",
"ref_id": "BIBREF15"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Similarity measures",
"sec_num": "4.1"
},
{
"text": "Another attribute obtained by the system was a value corresponding with the sum of the smaller distances (using QGram-Distance) between the words or the lemmas of the phrase one with each words of the phrase two.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Similarity measures",
"sec_num": "4.1"
},
{
"text": "As part of the attributes extracted by the system, was also the value of the sum of the smaller distances (using Levenshtein) among stems, chunks and entities of both phrases.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Similarity measures",
"sec_num": "4.1"
},
{
"text": "Another algorithm that we created is the Lexical-Semantic Alignment. In this algorithm, we tried to align the phrases by its lemmas. If the lemmas coincide we look for coincidences among partsof-speech 4 (POS), and then the phrase is realigned using both. If the words do not share the same POS, they will not be aligned. To this point, we only have taken into account a lexical alignment. From now on, we are going to apply a semantic variant. After all the process, the nonaligned words will be analyzed taking into account its WordNet's relations (synonymy, hyponymy, hyperonymy, derivationally-relatedform, similar-to, verbal group, entailment and cause-to relation); and a set of equivalences like abbreviations of months, countries, capitals, days and currency. In case of hyperonymy and hyponymy relation, words are going to be aligned if there is a word in the first sentence that is in the same relation (hyperonymy or hyponymy) with another one in the second sentence. For the relations \"cause-to\" and \"implication\" the words will be aligned if there is a word in the first sentence that causes or implicates another one in the second sentence. All the other types of relations will be carried out in bidirectional way, that is, there is an alignment if a word of the first sentence is a synonymous of another one belonging to the second one or vice versa.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Lexical-Semantic alignment",
"sec_num": "4.2"
},
{
"text": "Finally, we obtain a value we called alignment relation. This value is calculated as = /",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Lexical-Semantic alignment",
"sec_num": "4.2"
},
{
"text": ". Where is the final alignment value, is the number of aligned words, and is the number of words of the shorter phrase. The value is also another feature for our system. Other extracted attributes they are the quantity of aligned words and the quantity of not aligned words. The core of the alignment is carried out in different ways, which are obtained from several attributes. Each way can be compared by:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Lexical-Semantic alignment",
"sec_num": "4.2"
},
{
"text": "\uf0b7 the part-of-speech. \uf0b7 the morphology and the part-of-speech. \uf0b7 the lemma and the part-of-speech. \uf0b7 the morphology, part-of-speech, and relationships of WordNet. \uf0b7 the lemma, part-of-speech, and relationships of WordNet.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Lexical-Semantic alignment",
"sec_num": "4.2"
},
{
"text": "This alignment method depends on calculating the semantic similarity between sentences based on an analysis of the relations, in ISR-WN, of the words that fix them. First, the two sentences are pre-processed with Freeling and the words are classified according to their POS, creating different groups.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Semantic Alignment",
"sec_num": "4.3"
},
{
"text": "The distance between two words will be the distance, based on WordNet, of the most probable sense of each word in the pair, on the contrary of our previously system in SemEval 2012. In that version, we assumed the selected sense after apply a double Hungarian Algorithm (Kuhn, 1955) , for more details please refer to (Fern\u00e1ndez et al., 2012) . The distance is computed according to the equation 1:",
"cite_spans": [
{
"start": 270,
"end": 282,
"text": "(Kuhn, 1955)",
"ref_id": "BIBREF19"
},
{
"start": 318,
"end": 342,
"text": "(Fern\u00e1ndez et al., 2012)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Semantic Alignment",
"sec_num": "4.3"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "( , ) = \u2211 * ( [ ], [ + 1]) = =0 ;",
"eq_num": "(1)"
}
],
"section": "Semantic Alignment",
"sec_num": "4.3"
},
{
"text": "Where is the collection of synsets corresponding to the minimum path between nodes and , is the length of subtracting one, is a function that search the relation connecting and nodes, is a weight associated to the relation searched by (see Table 1 ). Let us see the following example: \uf0b7 We could take the pair 99 of corpus MSRvid (from training set of SemEval-2013) with a littler transformation in order to a better explanation of our method. Original pair A: A polar bear is running towards a group of walruses. B: A polar bear is chasing a group of walruses. Transformed pair: A1: A polar bear runs towards a group of cats. B1: A wale chases a group of dogs.",
"cite_spans": [],
"ref_spans": [
{
"start": 240,
"end": 247,
"text": "Table 1",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Semantic Alignment",
"sec_num": "4.3"
},
{
"text": "Later on, using equation 1, a matrix with the distances between all groups of both phrases is created (see Table 2 ).",
"cite_spans": [],
"ref_spans": [
{
"start": 107,
"end": 114,
"text": "Table 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Relation",
"sec_num": null
},
{
"text": "runs towards group cats Using the Hungarian Algorithm (Kuhn, 1955) for Minimum Cost Assignment, each group of the first sentence is checked with each element of the second sentence, and the rest is marked as words that were not aligned.",
"cite_spans": [
{
"start": 54,
"end": 66,
"text": "(Kuhn, 1955)",
"ref_id": "BIBREF19"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "GROUPS polar bear",
"sec_num": null
},
{
"text": "In the previous example the words \"toward\" and \"polar\" are the words that were not aligned, so the number of non-aligned words is two. There is only one perfect match: \"group-group\" (match with cost=0). The length of the shortest sentence is four. The Table 3 shows the results of this analysis. This process has to be repeated for nouns (see Table 4 ), verbs, adjective, adverbs, prepositions, conjunctions, pronouns, determinants, modifiers, digits and date times. On the contrary, the tables have to be created only with the similar groups of the sentences. Several attributes are extracted from the pair of sentences (see Table 3 and Table 5 ). Three attributes considering only verbs, only nouns, only adjectives, only adverbs, only prepositions, only conjunctions, only pronouns, only determinants, only modifiers, only digits, and only date times. These attributes are:",
"cite_spans": [],
"ref_spans": [
{
"start": 252,
"end": 259,
"text": "Table 3",
"ref_id": "TABREF4"
},
{
"start": 343,
"end": 350,
"text": "Table 4",
"ref_id": null
},
{
"start": 626,
"end": 633,
"text": "Table 3",
"ref_id": "TABREF4"
},
{
"start": 638,
"end": 645,
"text": "Table 5",
"ref_id": null
}
],
"eq_spans": [],
"section": "GROUPS polar bear",
"sec_num": null
},
{
"text": "\uf0b7 Number of exact coincidences \uf0b7 Total distance of matching \uf0b7 Number of words that do not match Many groups have particular features according to their parts-of-speech. The group of the nouns has one more feature that indicates if the two phrases have the same number (plural or singular). For this feature, we take the average of the number of each noun in the phrase like a number of the phrase.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Number of exact coincidence",
"sec_num": null
},
{
"text": "For the group of adjectives we added a feature indicating the distance between the nouns that modify it from the aligned adjectives, respectively.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Number of exact coincidence",
"sec_num": null
},
{
"text": "For the verbs, we search the nouns that precede it, and the nouns that are next of the verb, and we define two groups. We calculated the distance to align each group with every pair of aligned verbs. The verbs have other feature that specifies if all verbs are in the same verbal time.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Number of exact coincidence",
"sec_num": null
},
{
"text": "With the adverbs, we search the verb that is modified by it, and we calculate their distance from all alignment pairs.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Number of exact coincidence",
"sec_num": null
},
{
"text": "With the determinants and the adverbs we detect if any of the alignment pairs are expressing negations (like don't, or do not) in both cases or not. Finally, we determine if the two phrases have the same principal action. For all this new features, we aid with Freeling tool.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Number of exact coincidence",
"sec_num": null
},
{
"text": "As a result, we finally obtain 42 attributes from this alignment method. It is important to remark that this alignment process searches to solve, for each word from the rows (see Table 4 ) it has a respectively word from the columns.",
"cite_spans": [],
"ref_spans": [
{
"start": 179,
"end": 186,
"text": "Table 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Number of exact coincidence",
"sec_num": null
},
{
"text": "From the alignment process, we extract different features that help us a better result of our MLS. Table 6 shows the group of features with lexical and semantic support, based on WordNet relation (named F1). Each of they were named with a prefix, a hyphen and a suffix. Table 7 describes the meaning of every prefix, and Table 8 shows the meaning of the suffixes. ",
"cite_spans": [],
"ref_spans": [
{
"start": 99,
"end": 106,
"text": "Table 6",
"ref_id": null
},
{
"start": 270,
"end": 277,
"text": "Table 7",
"ref_id": null
},
{
"start": 321,
"end": 329,
"text": "Table 8",
"ref_id": null
}
],
"eq_spans": [],
"section": "Description of the alignment feature",
"sec_num": "4.4"
},
{
"text": "NWunch, SWaterman, SWGotoh, SWGAffine, Jaro, JaroW, CLDeviation, CMLength, QGramD, BlockD, CosineS, DiceS, EuclideanD, JaccardS, MaCoef, MongeElkan, OverlapCoef. Other features we extracted were obtained from the following similarity measures (named F2) (see Table 9 for detail). We used another group named F3, with lexical measure extracted from SimMetric library (see Table 10 for detail).",
"cite_spans": [],
"ref_spans": [
{
"start": 259,
"end": 266,
"text": "Table 9",
"ref_id": null
},
{
"start": 371,
"end": 379,
"text": "Table 10",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Features",
"sec_num": null
},
{
"text": "Finally we used a group of five feature (named F4), extracted from all against all alignment (see Table 11 for detail).",
"cite_spans": [],
"ref_spans": [
{
"start": 98,
"end": 106,
"text": "Table 11",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Features",
"sec_num": null
},
{
"text": "For the training process, we used a supervised learning framework, including all the training set as a training corpus. Using ten-fold cross validation with the classifier mentioned in section 3 (experimentally selected).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Description of the training phase",
"sec_num": "4.5"
},
{
"text": "As we can see in Table 12 , the attributes corresponding with the Test 1 (only lexical attributes) obtain 0.7534 of correlation. On the other side, the attributes of the Test 2 (lexical features with semantic support) obtain 0.7549 of correlation, and all features obtain 0.7987. Being demonstrated the necessity to tackle the problem of the similarity from a multidimensional point of view (see Test 3 in the Table 12 ). ",
"cite_spans": [],
"ref_spans": [
{
"start": 17,
"end": 25,
"text": "Table 12",
"ref_id": "TABREF2"
},
{
"start": 410,
"end": 418,
"text": "Table 12",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Description of the training phase",
"sec_num": "4.5"
},
{
"text": "Semantic Textual Similarity task of SemEval-2013 offered two official measures to rank the systems 5 : Mean-the main evaluation value, Rank-gives the rank of the submission as ordered by the \"mean\" result. SMT dataset comes from DARPA GALE HTER and HyTER. One sentence is a MT output and the other is a reference translation where a reference is generated based on human post editing. Table 13 . Test Core Datasets.",
"cite_spans": [],
"ref_spans": [
{
"start": 385,
"end": 393,
"text": "Table 13",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Result and discussion",
"sec_num": "5"
},
{
"text": "Using these measures, our second run (Run 2) obtained the best results (see Table 14 ). As we can see in Table 14 , our lexical run has obtained our best result, given at the same time worth result in our other runs. This demonstrates that tackling this problem with combining multiple lexical similarity measure produce better results in concordance to this specific test corpora.",
"cite_spans": [],
"ref_spans": [
{
"start": 76,
"end": 84,
"text": "Table 14",
"ref_id": "TABREF2"
},
{
"start": 105,
"end": 113,
"text": "Table 14",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Result and discussion",
"sec_num": "5"
},
{
"text": "To explain Table 14 we present following descriptions: caption in top row mean: 1-Headlines, 2-OnWN, 3-FNWN, 4-SMT and 5mean. The Run 1 is our main run, which contains the junction of all attributes (lexical and semantic attributes). Table 14 shows the results of all the runs for a different corpus from test phase. As we can see, Run 1 did not obtain the best results among our runs.",
"cite_spans": [],
"ref_spans": [
{
"start": 11,
"end": 19,
"text": "Table 14",
"ref_id": "TABREF2"
},
{
"start": 234,
"end": 242,
"text": "Table 14",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Result and discussion",
"sec_num": "5"
},
{
"text": "Otherwise, Run 3 uses more semantic analysis than Run 2, from this; Run 3 should get better results than reached over the corpus of FNWN, because this corpus is extracted from FrameNet corpus (Baker et al., 1998 ) (a semantic network). FNWN provides examples with high semantic content than lexical.",
"cite_spans": [
{
"start": 192,
"end": 211,
"text": "(Baker et al., 1998",
"ref_id": "BIBREF5"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Result and discussion",
"sec_num": "5"
},
{
"text": "Run 3 obtained a correlation coefficient of 0.8137 for all training corpus of SemEval 2013, while Run 2 and Run 1 obtained 0.7976 and 0.8345 respectively with the same classifier (Bagging using REPTree, and cross validation with ten-folds). These results present a contradiction between test and train evaluation. We think it is consequence of some obstacles present in test corpora, for example:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Result and discussion",
"sec_num": "5"
},
{
"text": "In headlines corpus there are great quantity of entities, acronyms and gentilics that we not take into account in our system.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Result and discussion",
"sec_num": "5"
},
{
"text": "The corpus FNWN presents a non-balance according to the length of the phrases.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Result and discussion",
"sec_num": "5"
},
{
"text": "In OnWN -test corpus-, we believe that some evaluations are not adequate in correspondence with the training corpus. For example, in line 7 the goal proposed was 0.6, however both phrases are semantically similar. The phrases are:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Result and discussion",
"sec_num": "5"
},
{
"text": "\uf0b7 the act of lifting something \uf0b7 the act of climbing something. We think that 0.6 are not a correct evaluation for this example. Our system result, for this particular case, was 4.794 for Run 3, and 3.814 for Run 2, finally 3.695 for Run 1.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Result and discussion",
"sec_num": "5"
},
{
"text": "This paper have introduced a new framework for recognizing Semantic Textual Similarity, which depends on the extraction of several features that can be inferred from a conventional interpretation of a text.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion and future works",
"sec_num": "6"
},
{
"text": "As mentioned in section 3 we have conducted three different runs, these runs only differ in the type of attributes used. We can see in Table 14 that all runs obtained encouraging results. Our best run was situated at 44 th position of 90 runs of the ranking of SemEval-2013. Table 12 and Table 14 show the reached positions for the three different runs and the ranking according to the rest of the teams.",
"cite_spans": [],
"ref_spans": [
{
"start": 135,
"end": 143,
"text": "Table 14",
"ref_id": "TABREF2"
},
{
"start": 275,
"end": 297,
"text": "Table 12 and Table 14",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Conclusion and future works",
"sec_num": "6"
},
{
"text": "In our participation, we used a MLS that works with features extracted from five different strategies: String Based Similarity Measures, Semantic Similarity Measures, Lexical-Semantic Alignment and Semantic Alignment. We have conducted the semantic features extraction in a multidimensional context using the resource ISR-WN, the one that allowed us to navigate across several semantic resources (WordNet, WordNet Domains, WordNet Affect, SUMO, SentiWordNet and Semantic Classes).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion and future works",
"sec_num": "6"
},
{
"text": "Finally, we can conclude that our system performs quite well. In our current work, we show that this approach can be used to correctly classify several examples from the STS task of SemEval-2013. Compared with the best run of the ranking (UMBC_EBIQUITY-ParingWords) (see Table 15 ) our main run has very close results in headlines (1), and SMT 4 As future work we are planning to enrich our semantic alignment method with Extended WordNet (Moldovan and Rus, 2001) , we think that with this improvement we can increase the results obtained with texts like those in OnWN test set.",
"cite_spans": [
{
"start": 439,
"end": 463,
"text": "(Moldovan and Rus, 2001)",
"ref_id": null
}
],
"ref_spans": [
{
"start": 271,
"end": 279,
"text": "Table 15",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Conclusion and future works",
"sec_num": "6"
},
{
"text": "Is important to remark that our team has been working up in collaboration with INAOE (Instituto Nacional de Astrof\u00edsica, \u00d3ptica y Electr\u00f3nica) and LIPN (Laboratoire d'Informatique de Paris-Nord), Universit\u00e9 Paris 13 universities, in order to encourage the knowledge interchange and open shared technology. Supporting this collaboration, INAOE-UPV (Instituto Nacional de Astrof\u00edsica, \u00d3ptica y Electr\u00f3nica and Universitat Polit\u00e8cnica de Val\u00e8ncia) team, in concrete in INAOE-UPVrun 3 has used our semantic distances for nouns, adjectives, verbs and adverbs, as well as lexical attributes like LevDoble, NormLevF, NormLevL and Ext (see influence of these attributes in Table 12 ).",
"cite_spans": [],
"ref_spans": [
{
"start": 665,
"end": 673,
"text": "Table 12",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Team Collaboration",
"sec_num": "6.1"
},
{
"text": "http://www.cs.york.ac.uk/semeval-2012/task6/ 2 http://www.cs.york.ac.uk/semeval-2012/task6/data/uploads/datasets/train-readme.txt",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Copyright (c) 2006 by Chris Parkinson, available in http://sourceforge.net/projects/simmetrics/",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "(noun, verb, adjective, adverbs, prepositions, conjunctions, pronouns, determinants, modifiers, etc.)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "http://ixa2.si.ehu.es/sts/index.php?option=com_content&vi ew=article&id=53&Itemid=61",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "This research work has been partially funded by the Spanish Government through the project TEXT-MESS 2.0 (TIN2009-13391-C04), \"An\u00e1lisis de Tendencias Mediante T\u00e9cnicas de Opini\u00f3n Sem\u00e1ntica\" (TIN2012-38536-C03-03) and \"T\u00e9cnicas de Deconstrucci\u00f3n en la Tecnolog\u00edas del Lenguaje Humano\" (TIN2012-31224); and by the Valencian Government through the project PROMETEO (PROMETEO/2009/199).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgments",
"sec_num": null
}
],
"bib_entries": {
"BIBREF1": {
"ref_id": "b1",
"title": "Shared Task: Semantic Textual Similarity including a Pilot on Typed-Similarity. *SEM 2013: The Second Joint Conference on Lexical and Computational Semantics",
"authors": [
{
"first": "M",
"middle": [],
"last": "Diab",
"suffix": ""
},
{
"first": "W",
"middle": [],
"last": "Guo",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Sem",
"suffix": ""
}
],
"year": 2013,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "M. Diab and W. Guo. *SEM 2013 Shared Task: Semantic Textual Similarity including a Pilot on Typed-Similarity. *SEM 2013: The Second Joint Conference on Lexical and Computational Semantics, Association for Computational Linguistics, 2013.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Task 6:A Pilot on Semantic Textual Similarity. First Join Conference on Lexical and Computational Semantic (*SEM)",
"authors": [
{
"first": "E",
"middle": [],
"last": "Aguirre",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Cerd",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Semeval",
"suffix": ""
}
],
"year": 2012,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Aguirre, E. and D. Cerd. SemEval 2012 Task 6:A Pilot on Semantic Textual Similarity. First Join Conference on Lexical and Computational Semantic (*SEM), Montr\u00e9al, Canada, Association for Computational Linguistics., 2012. 385-393 p.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "FreeLing 1.3: Syntactic and semantic services in an opensource NLP library",
"authors": [
{
"first": "M",
"middle": [],
"last": "Gonz\u00e1lez",
"suffix": ""
},
{
"first": "; L",
"middle": [],
"last": "Padr\u00f3",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Padr\u00f3",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of LREC'06",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "M. Gonz\u00e1lez; L. Padr\u00f3 and M. Padr\u00f3. FreeLing 1.3: Syntactic and semantic services in an opensource NLP library. Proceedings of LREC'06, Genoa, Italy, 2006.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "The berkeley framenet project",
"authors": [
{
"first": "C",
"middle": [
"F.; C J"
],
"last": "Baker",
"suffix": ""
},
{
"first": "J",
"middle": [
"B"
],
"last": "Fillmore",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Lowe",
"suffix": ""
}
],
"year": 1998,
"venue": "Proceedings of the 17th international conference on Computational linguistics",
"volume": "1",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Baker, C. F.; C. J. Fillmore and J. B. Lowe. The berkeley framenet project. Proceedings of the 17th international conference on Computational linguistics-Volume 1, Association for Computational Linguistics, 1998. 86-90 p.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "UNT:A Supervised Synergistic Approach to SemanticText Similarity",
"authors": [
{
"first": "M",
"middle": [],
"last": "Mohler",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Mihalcea",
"suffix": ""
}
],
"year": 2012,
"venue": "First Joint Conference on Lexical and Computational Semantics (*SEM)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "M. Mohler and R. Mihalcea. UNT:A Supervised Synergistic Approach to SemanticText Similarity. First Joint Conference on Lexical and Computational Semantics (*SEM), Montr\u00e9al. Canada, Association for Computational Linguistics, 2012. 635-642 p.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Bagging predictors Machine learning",
"authors": [
{
"first": "L",
"middle": [],
"last": "Breiman",
"suffix": ""
}
],
"year": 1996,
"venue": "",
"volume": "24",
"issue": "",
"pages": "123--140",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Breiman, L. Bagging predictors Machine learning, 1996, 24(2): 123-140.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Measuring the Semantic Similarity of Texts, Association for Computational Linguistic",
"authors": [
{
"first": "C",
"middle": [],
"last": "Corley",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Mihalcea",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of the ACL Work shop on Empirical Modeling of Semantic Equivalence and Entailment",
"volume": "",
"issue": "",
"pages": "13--18",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Corley, C. and R. Mihalcea. Measuring the Semantic Similarity of Texts, Association for Computational Linguistic. Proceedings of the ACL Work shop on Empirical Modeling of Semantic Equivalence and Entailment, pages 13-18, June 2005.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "UMCC_DLSI: Multidimensional Lexical-Semantic Textual Similarity. {*SEM 2012}: The First Joint Conference on Lexical and Computational Semantics",
"authors": [
{
"first": "A",
"middle": [],
"last": "Gonz\u00e1lez; R. Estrada",
"suffix": ""
},
{
"first": "; Y",
"middle": [],
"last": "Casta\u00f1eda",
"suffix": ""
},
{
"first": ";",
"middle": [
"S"
],
"last": "V\u00e1zquez",
"suffix": ""
},
{
"first": "; A",
"middle": [],
"last": "Montoyo",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Mu\u00f1oz",
"suffix": ""
}
],
"year": 2012,
"venue": "",
"volume": "1",
"issue": "",
"pages": "608--616",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "A. Gonz\u00e1lez; R. Estrada; Y. Casta\u00f1eda; S. V\u00e1zquez; A. Montoyo and R. Mu\u00f1oz. UMCC_DLSI: Multidimensional Lexical- Semantic Textual Similarity. {*SEM 2012}: The First Joint Conference on Lexical and Computational Semantics --Volume 1: Proceedings of the main conference and the shared task, and Volume 2: Proceedings of the Sixth International Workshop on Semantic Evaluation {(SemEval 2012)}, Montreal, Canada, Association for Computational Linguistics, 2012. 608--616 p.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Un algoritmo para la extracci\u00f3n de caracter\u00edsticas lexicogr\u00e1ficas en la comparaci\u00f3n de palabras",
"authors": [
{
"first": "A",
"middle": [
"C.; J"
],
"last": "Fern\u00e1ndez Orqu\u00edn",
"suffix": ""
},
{
"first": ";",
"middle": [
"A"
],
"last": "Blanco",
"suffix": ""
},
{
"first": "R. Mu\u00f1oz",
"middle": [],
"last": "Fundora Rolo",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Guillena",
"suffix": ""
}
],
"year": 2009,
"venue": "IV Convenci\u00f3n Cient\u00edfica Internacional CIUM",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Fern\u00e1ndez Orqu\u00edn, A. C.; J. D\u00edaz Blanco; A. Fundora Rolo and R. Mu\u00f1oz Guillena. Un algoritmo para la extracci\u00f3n de caracter\u00edsticas lexicogr\u00e1ficas en la comparaci\u00f3n de palabras. IV Convenci\u00f3n Cient\u00edfica Internacional CIUM, Matanzas, Cuba, 2009.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Integration of semantic resources based on WordNet. XXVI Congreso de la Sociedad Espa\u00f1ola para el Procesamiento del Lenguaje Natural",
"authors": [
{
"first": "Y",
"middle": [
"; A"
],
"last": "Guti\u00e9rrez",
"suffix": ""
},
{
"first": "; A",
"middle": [],
"last": "Fern\u00e1ndez",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Montoyo",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "V\u00e1zquez",
"suffix": ""
}
],
"year": 2010,
"venue": "",
"volume": "",
"issue": "",
"pages": "1135--5948",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Guti\u00e9rrez, Y.; A. Fern\u00e1ndez; A. Montoyo and S. V\u00e1zquez. Integration of semantic resources based on WordNet. XXVI Congreso de la Sociedad Espa\u00f1ola para el Procesamiento del Lenguaje Natural, Universidad Polit\u00e9cnica de Valencia, Valencia, SEPLN 2010, 2010a. 161-168 p. 1135- 5948.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "UMCC-DLSI: Integrative resource for disambiguation task",
"authors": [
{
"first": "Y",
"middle": [
"; A"
],
"last": "Guti\u00e9rrez",
"suffix": ""
},
{
"first": "; A",
"middle": [],
"last": "Fern\u00e1ndez",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Montoyo",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "V\u00e1zquez",
"suffix": ""
}
],
"year": 2010,
"venue": "Proceedings of the 5th International Workshop on Semantic Evaluation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Guti\u00e9rrez, Y.; A. Fern\u00e1ndez; A. Montoyo and S. V\u00e1zquez. UMCC-DLSI: Integrative resource for disambiguation task. Proceedings of the 5th International Workshop on Semantic Evaluation, Uppsala, Sweden, Association for Computational Linguistics, 2010b. 427-432 p.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Enriching the Integration of Semantic Resources based on WordNet Procesamiento del Lenguaje Natural",
"authors": [
{
"first": "Y",
"middle": [
"; A"
],
"last": "Guti\u00e9rrez",
"suffix": ""
},
{
"first": "; A",
"middle": [],
"last": "Fern\u00e1ndez",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Montoyo",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "V\u00e1zquez",
"suffix": ""
}
],
"year": 2011,
"venue": "",
"volume": "47",
"issue": "",
"pages": "249--257",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Guti\u00e9rrez, Y.; A. Fern\u00e1ndez; A. Montoyo and S. V\u00e1zquez Enriching the Integration of Semantic Resources based on WordNet Procesamiento del Lenguaje Natural, 2011, 47: 249-257.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Algorithms for the longest common subsequence problem",
"authors": [
{
"first": "D",
"middle": [
"S"
],
"last": "Hirschberg",
"suffix": ""
}
],
"year": 1977,
"venue": "J. ACM",
"volume": "24",
"issue": "",
"pages": "664--675",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hirschberg, D. S. Algorithms for the longest common subsequence problem J. ACM, 1977, 24: 664-675.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "A Proposal of Automatic Selection of Coarse-grained Semantic Classes for WSD Procesamiento del Lenguaje Natural",
"authors": [
{
"first": "R",
"middle": [
"; A"
],
"last": "Izquierdo",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Su\u00e1rez",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Rigau",
"suffix": ""
}
],
"year": 2007,
"venue": "",
"volume": "39",
"issue": "",
"pages": "189--196",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Izquierdo, R.; A. Su\u00e1rez and G. Rigau A Proposal of Automatic Selection of Coarse-grained Semantic Classes for WSD Procesamiento del Lenguaje Natural, 2007, 39: 189-196.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "The Hungarian Method for the assignment problem Naval Research Logistics Quarterly",
"authors": [
{
"first": "H",
"middle": [
"W"
],
"last": "Kuhn",
"suffix": ""
}
],
"year": 1955,
"venue": "",
"volume": "2",
"issue": "",
"pages": "83--97",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kuhn, H. W. The Hungarian Method for the assignment problem Naval Research Logistics Quarterly, 1955, 2: 83-97.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "Binary codes capable of correcting spurious insertions and deletions of ones. Problems of information Transmission",
"authors": [
{
"first": "V",
"middle": [
"I"
],
"last": "Levenshtein",
"suffix": ""
}
],
"year": 1965,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Levenshtein, V. I. Binary codes capable of correcting spurious insertions and deletions of ones. Problems of information Transmission. 1965. pp. 8-17 p.",
"links": null
},
"BIBREF22": {
"ref_id": "b22",
"title": "Introduction to WordNet: An Online",
"authors": [
{
"first": "G",
"middle": [
"A.; R"
],
"last": "Miller",
"suffix": ""
},
{
"first": "; C",
"middle": [],
"last": "Beckwith",
"suffix": ""
},
{
"first": ";",
"middle": [
"D"
],
"last": "Fellbaum",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Gross",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Miller",
"suffix": ""
}
],
"year": 1990,
"venue": "Lexical Database International Journal of Lexicography",
"volume": "3",
"issue": "4",
"pages": "235--244",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Miller, G. A.; R. Beckwith; C. Fellbaum; D. Gross and K. Miller Introduction to WordNet: An On- line Lexical Database International Journal of Lexicography, 3(4):235-244., 1990b.",
"links": null
},
"BIBREF23": {
"ref_id": "b23",
"title": "Rus Explaining Answers with Extended WordNet ACL",
"authors": [
{
"first": "D",
"middle": [
"I"
],
"last": "Moldovan",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "",
"suffix": ""
}
],
"year": 2001,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Moldovan, D. I. and V. Rus Explaining Answers with Extended WordNet ACL, 2001.",
"links": null
},
"BIBREF24": {
"ref_id": "b24",
"title": "A general method applicable to the search for similarities in the amino acid sequence of two proteins",
"authors": [
{
"first": "S",
"middle": [],
"last": "Neeedleman",
"suffix": ""
},
{
"first": "C",
"middle": [],
"last": "Wunsch",
"suffix": ""
}
],
"year": 1970,
"venue": "Mol. Biol",
"volume": "48",
"issue": "443",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Neeedleman, S. and C. Wunsch A general method applicable to the search for similarities in the amino acid sequence of two proteins Mol. Biol, 1970, 48(443): 453.",
"links": null
},
"BIBREF25": {
"ref_id": "b25",
"title": "Origins of the IEEE Standard Upper Ontology. Working Notes of the IJCAI-2001 Workshop on the IEEE Standard Upper Ontology",
"authors": [
{
"first": "I",
"middle": [],
"last": "Niles",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Pease",
"suffix": ""
}
],
"year": 2001,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Niles, I. and A. Pease. Origins of the IEEE Standard Upper Ontology. Working Notes of the IJCAI- 2001 Workshop on the IEEE Standard Upper Ontology, Seattle, Washington, USA., 2001.",
"links": null
},
"BIBREF27": {
"ref_id": "b27",
"title": "TakeLab: Systems for Measuring Semantic Text Similarity",
"authors": [
{
"first": "J",
"middle": [],
"last": "\u0160najder",
"suffix": ""
},
{
"first": "B",
"middle": [
"D"
],
"last": "Basi\u0107",
"suffix": ""
}
],
"year": 2012,
"venue": "First Join Conference on Lexical and Computational Semantic (*SEM)",
"volume": "",
"issue": "",
"pages": "385--393",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "J. \u0160najder and B. D. Basi\u0107. TakeLab: Systems for Measuring Semantic Text Similarity. Montr\u00e9al, Canada, First Join Conference on Lexical and Computational Semantic (*SEM), pages 385-393. Association for Computational Linguistics., 2012.",
"links": null
},
"BIBREF28": {
"ref_id": "b28",
"title": "WordNet-Affect: an affective extension of WordNet",
"authors": [
{
"first": "C",
"middle": [],
"last": "Strapparava",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Valitutti",
"suffix": ""
}
],
"year": 2004,
"venue": "Proceedings of the 4th International Conference on Language Resources and Evaluation (LREC 2004)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Strapparava, C. and A. Valitutti. WordNet-Affect: an affective extension of WordNet. Proceedings of the 4th International Conference on Language Resources and Evaluation (LREC 2004), Lisbon, 2004. 1083-1086 p.",
"links": null
},
"BIBREF29": {
"ref_id": "b29",
"title": "COGEX at the Second Recognizing Textual Entailment Challenge",
"authors": [
{
"first": "M",
"middle": [
"; B"
],
"last": "Tatu",
"suffix": ""
},
{
"first": "; J",
"middle": [],
"last": "Iles",
"suffix": ""
},
{
"first": ";",
"middle": [
"N"
],
"last": "Slavick",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Adrian",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Moldovan",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of the Second PASCAL Recognising Textual Entailment Challenge Workshop",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Tatu, M.; B. Iles; J. Slavick; N. Adrian and D. Moldovan. COGEX at the Second Recognizing Textual Entailment Challenge. Proceedings of the Second PASCAL Recognising Textual Entailment Challenge Workshop, Venice, Italy, 2006. 104-109 p.",
"links": null
},
"BIBREF30": {
"ref_id": "b30",
"title": "The Compositionality of Meaning and Content",
"authors": [
{
"first": "M",
"middle": [],
"last": "Werning",
"suffix": ""
},
{
"first": ";",
"middle": [
"E"
],
"last": "Machery",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Schurz",
"suffix": ""
}
],
"year": null,
"venue": "North and South America by Transaction Books, 2005. p. Linguistics & philosophy",
"volume": "1",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Werning, M.; E. Machery and G. Schurz. The Compositionality of Meaning and Content, Volume 1: Foundational issues. ontos verlag [Distributed in] North and South America by Transaction Books, 2005. p. Linguistics & philosophy, Bd. 1. 3-937202-52-8.",
"links": null
},
"BIBREF31": {
"ref_id": "b31",
"title": "The state of record linkage and current research problems",
"authors": [
{
"first": "W",
"middle": [],
"last": "Winkler",
"suffix": ""
}
],
"year": 1999,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Winkler, W. The state of record linkage and current research problems. Technical Report, Statistical Research Division, U.S, Census Bureau, 1999.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"type_str": "figure",
"uris": null,
"num": null,
"text": "System Architecture."
},
"TABREF2": {
"type_str": "table",
"num": null,
"text": "shows the weights associated to WordNet relations between two synsets.",
"content": "<table/>",
"html": null
},
"TABREF4": {
"type_str": "table",
"num": null,
"text": "Features from the analyzed sentences.",
"content": "<table><tr><td/><td>Total Distances of</td><td>Number of</td></tr><tr><td/><td>optimal Matching</td><td>non-aligned</td></tr><tr><td/><td/><td>Words</td></tr><tr><td>1</td><td>5</td><td>2</td></tr></table>",
"html": null
},
"TABREF5": {
"type_str": "table",
"num": null,
"text": "shows features extracted from the analysis of nouns. Feature extracted from analysis of nouns.",
"content": "<table><tr><td>GROUPS</td><td>bear</td><td>group</td><td>cats</td></tr><tr><td>wale</td><td>Dist := 2</td><td/><td>Dist := 2</td></tr><tr><td>group</td><td/><td>Dist := 0</td><td/></tr><tr><td>dogs</td><td>Dist := 1</td><td/><td>Dist := 1</td></tr><tr><td colspan=\"4\">Table 4. Distances between groups of nouns.</td></tr></table>",
"html": null
},
"TABREF6": {
"type_str": "table",
"num": null,
"text": "Suffixes for describe each type of alignment.",
"content": "<table><tr><td>Features</td><td/><td/><td/></tr><tr><td colspan=\"5\">CPA_FCG, CPNA_FCG, SIM_FCG, CPA_LCG,</td></tr><tr><td colspan=\"2\">CPNA_LCG,</td><td/><td colspan=\"2\">SIM_LCG,</td><td>CPA_FCGR,</td></tr><tr><td colspan=\"3\">CPNA_FCGR,</td><td colspan=\"2\">SIM_FCGR,</td><td>CPA_LCGR,</td></tr><tr><td colspan=\"5\">CPNA_LCGR, SIM_LCGR</td></tr><tr><td colspan=\"5\">Table 6. F1. Semantic feature group.</td></tr><tr><td colspan=\"4\">Prefixes Descriptions</td></tr><tr><td>CPA</td><td colspan=\"4\">Number of aligned words.</td></tr><tr><td>CPNA</td><td colspan=\"4\">Number of non-aligned words.</td></tr><tr><td>SIM</td><td colspan=\"2\">Similarity</td><td/></tr><tr><td/><td colspan=\"4\">Table 7. Meaning of each prefixes.</td></tr><tr><td colspan=\"5\">Prefixes Compared words for\u2026</td></tr><tr><td>FCG</td><td colspan=\"4\">Morphology and POS</td></tr><tr><td>LCG</td><td colspan=\"4\">Lemma and POS</td></tr><tr><td>FCGR</td><td colspan=\"4\">Morphology, POS and WordNet relation.</td></tr><tr><td>LCGR</td><td colspan=\"4\">Lemma, POS and WordNet relation.</td></tr><tr><td>Table 8Features</td><td/><td colspan=\"3\">Descriptions</td></tr><tr><td colspan=\"2\">LevForma</td><td colspan=\"3\">Levenshtein Distance between two</td></tr><tr><td/><td/><td colspan=\"2\">phrases</td><td>comparing</td><td>words</td><td>by</td></tr><tr><td/><td/><td colspan=\"3\">morphology</td></tr><tr><td>LevLema</td><td/><td colspan=\"3\">The same as above, but now</td></tr><tr><td/><td/><td colspan=\"3\">comparing by lemma.</td></tr><tr><td>LevDoble</td><td/><td colspan=\"3\">Idem, but comparing again by</td></tr><tr><td/><td/><td colspan=\"3\">Levenshtein and accepting words</td></tr><tr><td/><td/><td colspan=\"3\">match if the distance is \u2264 2.</td></tr><tr><td>DEx</td><td/><td colspan=\"3\">Extended Distance</td></tr><tr><td colspan=\"2\">NormLevF,</td><td colspan=\"3\">Normalized forms of LevForma and</td></tr><tr><td colspan=\"2\">NormLevL</td><td colspan=\"2\">LevLema.</td></tr><tr><td colspan=\"5\">Table 9. F2. Lexical alignment measures.</td></tr></table>",
"html": null
},
"TABREF7": {
"type_str": "table",
"num": null,
"text": "Lexical Measure from SimMetrics library. Aligning all against all.",
"content": "<table><tr><td>Features</td><td>Descriptions</td></tr><tr><td>AxAQGD_L</td><td>All against all applying QGramD</td></tr><tr><td/><td>and comparing by lemmas of the</td></tr><tr><td/><td>words.</td></tr><tr><td>AxAQGD_F</td><td>Same as above, but applying</td></tr><tr><td/><td>QGramD and comparing by</td></tr><tr><td/><td>morphology.</td></tr><tr><td colspan=\"2\">AxAQGD_LF Idem, not only comparing by lemma</td></tr><tr><td/><td>but also by morphology.</td></tr><tr><td>AxALev_LF</td><td>All against all applying Levenhstein</td></tr></table>",
"html": null
},
"TABREF11": {
"type_str": "table",
"num": null,
"text": "core test datasets.",
"content": "<table><tr><td>Run</td><td>1</td><td>2</td><td>3</td><td>4</td><td>5</td><td>6</td></tr><tr><td colspan=\"7\">(First) 0.7642 0.7529 0.5818 0.3804 0.6181 1</td></tr><tr><td>(Our) RUN 2</td><td colspan=\"6\">0.6168 0.5557 0.3045 0.3407 0.4833 44</td></tr><tr><td colspan=\"7\">Table 15. Comparison with best run (SemEval 2013).</td></tr></table>",
"html": null
}
}
}
} |