File size: 124,061 Bytes
6fa4bc9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 | {
"paper_id": "2020",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T13:04:58.256539Z"
},
"title": "TermEval 2020: Shared Task on Automatic Term Extraction Using the Annotated Corpora for Term Extraction Research (ACTER) Dataset",
"authors": [
{
"first": "Ayla",
"middle": [],
"last": "Rigouts Terryn",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Veronique",
"middle": [],
"last": "Hoste",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Patrick",
"middle": [],
"last": "Drouin",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Universit\u00e9 de Montr\u00e9al",
"location": {}
},
"email": "patrick.drouin@umontreal.ca"
},
{
"first": "Els",
"middle": [],
"last": "Lefever",
"suffix": "",
"affiliation": {},
"email": ""
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "The TermEval 2020 shared task provided a platform for researchers to work on automatic term extraction (ATE) with the same dataset: the Annotated Corpora for Term Extraction Research (ACTER). The dataset covers three languages (English, French, and Dutch) and four domains, of which the domain of heart failure was kept as a held-out test set on which final f1-scores were calculated. The aim was to provide a large, transparent, qualitatively annotated, and diverse dataset to the ATE research community, with the goal of promoting comparative research and thus identifying strengths and weaknesses of various state-of-the-art methodologies. The results show a lot of variation between different systems and illustrate how some methodologies reach higher precision or recall, how different systems extract different types of terms, how some are exceptionally good at finding rare terms, or are less impacted by term length. The current contribution offers an overview of the shared task with a comparative evaluation, which complements the individual papers by all participants.",
"pdf_parse": {
"paper_id": "2020",
"_pdf_hash": "",
"abstract": [
{
"text": "The TermEval 2020 shared task provided a platform for researchers to work on automatic term extraction (ATE) with the same dataset: the Annotated Corpora for Term Extraction Research (ACTER). The dataset covers three languages (English, French, and Dutch) and four domains, of which the domain of heart failure was kept as a held-out test set on which final f1-scores were calculated. The aim was to provide a large, transparent, qualitatively annotated, and diverse dataset to the ATE research community, with the goal of promoting comparative research and thus identifying strengths and weaknesses of various state-of-the-art methodologies. The results show a lot of variation between different systems and illustrate how some methodologies reach higher precision or recall, how different systems extract different types of terms, how some are exceptionally good at finding rare terms, or are less impacted by term length. The current contribution offers an overview of the shared task with a comparative evaluation, which complements the individual papers by all participants.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Automatic Term Extraction (ATE) can be defined as the automated process of identifying terminology from a corpus of specialised texts. Despite receiving plenty of research attention, it remains a challenging task, not in the least because terms are so difficult to define. Terms are typically described as \"lexical items that represent concepts of a domain\" (Kageura and Marshman, 2019) , but such definitions leave room for many questions about the fundamental nature of terms. Since ATE is supposed to automatically identify terms from specialised text, the absence of a consensus about the basic characteristics of terms is problematic. The disagreement covers both practical aspects, such as term length and part-of-speech (POS) pattern, and theoretical considerations about the difference between words (or collocations/phrases) and terms. This poses great difficulties for many aspects of ATE, from data collection, to extraction methodology, to evaluation. Data collection, i.e. creating domain-specific corpora in which terms have been annotated, is time-and effortconsuming. When manual term annotation is involved, inter-annotator agreement is notoriously low and there is no consensus about an annotation protocol (Estop\u00e0, 2001) . This leads to a scarcity in available resources. Moreover, it means that the few available datasets are difficult to combine and compare, and often cover only a single language and domain. While the manual annotation bottleneck has often been circumvented by starting from existing resources, such as ontologies or terminological databases, specialised dictionaries, or book indexes, such strategies do not have the same advantages as manual annotation and will rarely cover all terms in an entire corpus. This is linked to the evaluation of ATE, for which the accepted metrics are precision (how many of the extracted terms are correct), recall (how many of the terms in the text have correctly been extracted), and f1-score (harmonic mean of the two). To calculate recall (and, therefore, also f1-score), it is necessary to know all true terms in a text. Since manual annotation is such an expensive operation, and relatively few resources are currently available, evaluation is often limited to either a single resource, or the calculation of precision. The ATE methodology itself, most notably the types of terms a system is designed to find, is impacted as well. Some of the most fundamental differences are term length (in number of tokens), term POS-pattern (sometimes only nouns and noun phrases, sometimes adjectives, adverbs, and verbs are included), and minimum term frequency. Differences which are more difficult to quantify are, for instance, how specialised or domain-specific a lexical unit needs to be before it is considered a term. These three aspects are closely related, since different systems and evaluation methods will be suited for different datasets. This combination of difficulties creates a hurdle for clear, comparative research. All of this can slow down the advance of ATE, especially now that (supervised) machine learning techniques are becoming more popular for the task. The TermEval shared task on ATE, using the ACTER Annotated Corpora for Term Extraction Research, was designed to lower these hurdles. The ACTER dataset contains specialised corpora in three languages (English, French, and Dutch) , and four domains (corruption, dressage (equitation), heart failure, and wind energy), which have been meticulously, manually annotated according to transparent guidelines. Both the texts and the annotations have been made freely available. The current version of the dataset presents the annotations as unstructured lists of all unique annotated terms (one term and its label per line), rather than providing the span of each occurrence of annotated terms in their context (which may be provided in future releases). The shared task brought together researchers to work on ATE with the same data and evaluation setup. It allowed a detailed comparison of dif-ferent methodologies. Standard evaluation methods (precision, recall, f1-score) were used for the basic evaluation and ranking; these are elaborated with more detailed evaluations as presented both in the current overview paper and in participants' contributions. The following sections start with a brief overview of current datasets and methodologies for ATE. In section 3, the ACTER dataset is described in some detail. The fourth section contains an overview of the shared task itself and the results. The final section is dedicated to a discussion and the conclusions.",
"cite_spans": [
{
"start": 358,
"end": 386,
"text": "(Kageura and Marshman, 2019)",
"ref_id": "BIBREF15"
},
{
"start": 1225,
"end": 1239,
"text": "(Estop\u00e0, 2001)",
"ref_id": "BIBREF8"
},
{
"start": 3350,
"end": 3378,
"text": "(English, French, and Dutch)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "Two of the most commonly used annotated datasets are GE-NIA (Kim et al., 2003) , and the ACL RD-TEC 2.0 (Qasemizadeh and Schumann, 2016), both of which are in English. GENIA is a collection of 2000 abstracts from the MEDLINE database in the domain of bio-medicine, specifically \"transcription factors in human blood cells\". Over 400k tokens were annotated by two domain experts to obtain 93,293 term annotations. The ACL-RD-TEC 2.0 contains 300 annotated abstracts from the ACL Anthology Reference Corpus. Again, two experts performed the annotation of 33k tokens, which resulted in 6818 term annotations. They claim three main advantages over GENIA: first, the domain (computational linguistics) means that ATE researchers will have a better understanding of the material. Second, the ACL RD-TEC corpus covers three decades, which allows some research of the evolution of terms. Third and finally, the annotation is more transparent, with freely available annotation guidelines and the possibility to download the annotations of both experts separately. There are other examples as well, such as the CRAFT corpus, another English corpus in the biomedical domain (99,907 annotations over 560k tokens) (Bada et al., 2012) , an English automotive corpus (28,656 annotations over 224,159 tokens) (Bernier-Colborne, 2012; Bernier-Colborne and Drouin, 2014), a diachronical English corpus on mechanical engineering (+10k annotations over 140k words) (Schumann and Fischer, 2016) , the TermITH French corpus on language sciences (14,544 unique validated terms found over 397,695 words) (TermITH, 2014; Billami et al., 2014) , a small German corpus on DIY, cooking, hunting and chess which focused on inter-annotator agreement between laypeople (912 annotations on which at least 5 out of 7 annotators agreed, over 3075 words) (H\u00e4tty and Schulte im Walde, 2018b) and, within the framework of the TTC project (Loginova et al., 2012) , lists of 107-159 annotated terms in corpora in seven languages and two domains (wind energy and mobile technology). While this is a non-exhaustive list, it illustrates an important and logical trend: either the created gold standard is quite large, with over 10k annotations, or it covers multiple languages and/or domains. While this is not necessarily problematic, the annotation guidelines for all of these corpora differ, and, therefore, the annotations themselves as well. That does create difficulties, since comparing ATE performance on multiple cor-pora will not necessarily reflect differences in performance between domains or languages, but may also show the contrast between the different annotation styles. The differences can be quite substantial, e.g. in GENIA and ACL RD-TEC, nested annotations are not allowed, in CRAFT they are only allowed under certain conditions, while in the TermITH project they are allowed in most cases. Moreover, it is important to note that the annotations of both the TermITH project and the TTC project are based on the manual annotation of ATE results, rather than manual annotations in the unprocessed text. A final remark is that some corpora have been annotated with multiple term labels or have even been annotated according to large taxonomies, while others don't make any distinctions beyond terms. As will be discussed in more detail in section 3, the ACTER dataset has been specifically designed to deal with some of the issues addressed here.",
"cite_spans": [
{
"start": 60,
"end": 78,
"text": "(Kim et al., 2003)",
"ref_id": "BIBREF18"
},
{
"start": 1201,
"end": 1220,
"text": "(Bada et al., 2012)",
"ref_id": "BIBREF2"
},
{
"start": 1445,
"end": 1473,
"text": "(Schumann and Fischer, 2016)",
"ref_id": "BIBREF34"
},
{
"start": 1580,
"end": 1595,
"text": "(TermITH, 2014;",
"ref_id": "BIBREF37"
},
{
"start": 1596,
"end": 1617,
"text": "Billami et al., 2014)",
"ref_id": "BIBREF5"
},
{
"start": 1820,
"end": 1855,
"text": "(H\u00e4tty and Schulte im Walde, 2018b)",
"ref_id": "BIBREF12"
},
{
"start": 1901,
"end": 1924,
"text": "(Loginova et al., 2012)",
"ref_id": "BIBREF21"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Manually Annotated Gold Standards for ATE",
"sec_num": "2.1."
},
{
"text": "Traditionally, three types of ATE methodologies are identified: linguistic (relying on linguistic information, such as POS-patterns and chunking), statistical (using frequencies, often compared to a reference corpus, to calculate termhood and unithood (Kageura and Umino, 1996) ), and hybrid methods (which combine the two). It has been established for some time that hybrid methods appear to outperform the other two (Macken et al., 2013) . These methods typically select candidate terms based on their POS-pattern and rank these candidate terms using the statistical metrics, thus combining the advantages of both techniques. A particular difficulty is defining the cut-off threshold for the term candidates, which can be defined as the top-n terms, the top-n percentage of terms, or all terms above a certain threshold score. Manually predicting the ideal cut-off point is extremely difficult and can result in a skew towards either precision or recall, which can be detrimental to the final f1score (Rigouts Terryn et al., 2019a) . While this typology of linguistic, statistical, and hybrid systems is sometimes still used today, in recent years, the advance of machine learning techniques has made such a simple classification of ATE methodologies more complicated (Gao and Yuan, 2019) . Methodologies have become so diverse that they are no longer easily captured in such a limited number of clearly delineated categories. For instance, apart from the distinction between statistical and linguistic systems, one could also distinguish between rulebased methods and machine learning methods. However, rather than a simple binary distinction, there is quite a range of options: methods that rely on a single statistical score (Drouin, 2003; Kosa et al., 2020) , systems that combine a limited number of features with a voting algorithm (Fedorenko et al., 2013; Vivaldi and Rodr\u00edguez, 2001) , an evolutionary algorithm that optimises the ROC-curve (Az\u00e9 et al., 2005) , rule-induction (Foo and Merkel, 2010), supportvector models (Ramisch et al., 2010) , logistic regression (Bolshakova et al., 2013; Judea et al., 2014) , basic neural networks (H\u00e4tty and Schulte im Walde, 2018a), recursive neural networks (Kucza et al., 2018) , siamese neural networks (Shah et al., 2019) , and convolutional neural networks (Wang et al., 2016) . Within the machine learn-ing systems, there are vast differences between supervised, semi-supervised, and unsupervised systems, as well as the distinction between sequence labelling approaches and systems that start from a limited list of unique term candidates. Splitting systems by their features is perhaps even more difficult, since research has moved far beyond using simple linguistic and statistical features. Some examples include the use of topic modelling (\u0160ajatovi\u0107 et al., 2019; Bolshakova et al., 2013) , queries on search engines, Wikipedia, or other external resources (Kessler et al., 2019; Vivaldi and Rodr\u00edguez, 2001) , and word embeddings (Amjadian et al., 2016; Kucza et al., 2018; Qasemizadeh and Handschuh, 2014; Pollak et al., 2019) . Some methods are even called \"featureless\" (Gao and Yuan, 2019; Wang et al., 2016) .",
"cite_spans": [
{
"start": 252,
"end": 277,
"text": "(Kageura and Umino, 1996)",
"ref_id": "BIBREF16"
},
{
"start": 418,
"end": 439,
"text": "(Macken et al., 2013)",
"ref_id": "BIBREF22"
},
{
"start": 1003,
"end": 1033,
"text": "(Rigouts Terryn et al., 2019a)",
"ref_id": "BIBREF32"
},
{
"start": 1270,
"end": 1290,
"text": "(Gao and Yuan, 2019)",
"ref_id": "BIBREF10"
},
{
"start": 1730,
"end": 1744,
"text": "(Drouin, 2003;",
"ref_id": "BIBREF7"
},
{
"start": 1745,
"end": 1763,
"text": "Kosa et al., 2020)",
"ref_id": "BIBREF19"
},
{
"start": 1840,
"end": 1864,
"text": "(Fedorenko et al., 2013;",
"ref_id": "BIBREF9"
},
{
"start": 1865,
"end": 1893,
"text": "Vivaldi and Rodr\u00edguez, 2001)",
"ref_id": "BIBREF38"
},
{
"start": 1951,
"end": 1969,
"text": "(Az\u00e9 et al., 2005)",
"ref_id": "BIBREF1"
},
{
"start": 2032,
"end": 2054,
"text": "(Ramisch et al., 2010)",
"ref_id": null
},
{
"start": 2077,
"end": 2102,
"text": "(Bolshakova et al., 2013;",
"ref_id": "BIBREF6"
},
{
"start": 2103,
"end": 2122,
"text": "Judea et al., 2014)",
"ref_id": "BIBREF14"
},
{
"start": 2210,
"end": 2230,
"text": "(Kucza et al., 2018)",
"ref_id": "BIBREF20"
},
{
"start": 2257,
"end": 2276,
"text": "(Shah et al., 2019)",
"ref_id": "BIBREF35"
},
{
"start": 2313,
"end": 2332,
"text": "(Wang et al., 2016)",
"ref_id": "BIBREF40"
},
{
"start": 2801,
"end": 2825,
"text": "(\u0160ajatovi\u0107 et al., 2019;",
"ref_id": null
},
{
"start": 2826,
"end": 2850,
"text": "Bolshakova et al., 2013)",
"ref_id": "BIBREF6"
},
{
"start": 2919,
"end": 2941,
"text": "(Kessler et al., 2019;",
"ref_id": "BIBREF17"
},
{
"start": 2942,
"end": 2970,
"text": "Vivaldi and Rodr\u00edguez, 2001)",
"ref_id": "BIBREF38"
},
{
"start": 2993,
"end": 3016,
"text": "(Amjadian et al., 2016;",
"ref_id": "BIBREF0"
},
{
"start": 3017,
"end": 3036,
"text": "Kucza et al., 2018;",
"ref_id": "BIBREF20"
},
{
"start": 3037,
"end": 3069,
"text": "Qasemizadeh and Handschuh, 2014;",
"ref_id": "BIBREF28"
},
{
"start": 3070,
"end": 3090,
"text": "Pollak et al., 2019)",
"ref_id": "BIBREF27"
},
{
"start": 3136,
"end": 3156,
"text": "(Gao and Yuan, 2019;",
"ref_id": "BIBREF10"
},
{
"start": 3157,
"end": 3175,
"text": "Wang et al., 2016)",
"ref_id": "BIBREF40"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "ATE",
"sec_num": "2.2."
},
{
"text": "There are many more ways in which ATE systems can vary. Some can already be deduced from the ways in which the datasets are annotated, such as support for nested terms. Another very fundamental difference is the frequency cutoff: many ATE systems only extract terms which appear above a certain frequency threshold in the corpora. This threshold is extremely variable, with some systems that do not have any threshold, others that only extract candidate terms which appear 15 times or more (Pollak et al., 2019) , and still others where only the top-n most frequent terms are extracted (Loukachevitch, 2012). Term length is similarly variable, with systems that don't place any restrictions, others that extract only single-word terms, only multi-word terms, or those that extract all terms between 1 and n tokens (with n ranging from 2 to 15), where n is sometimes determined by the restrictions of a system, sometimes experimentally set to an optimal value, and at other times directly determined by the maximum term length in a gold standard. There are many other possible differences, such as POS patterns, which will not be discussed in any detail here. More information regarding both datasets for ATE and different ATE methodologies can be found in Rigouts Terryn et al. (2019b) .",
"cite_spans": [
{
"start": 490,
"end": 511,
"text": "(Pollak et al., 2019)",
"ref_id": "BIBREF27"
},
{
"start": 1264,
"end": 1285,
"text": "Terryn et al. (2019b)",
"ref_id": "BIBREF33"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "ATE",
"sec_num": "2.2."
},
{
"text": "With such a great variety of methodologies, comparative research is essential to identify the strengths and weaknesses of the respective strategies. However, as discussed, appropriate datasets are scarce and often limited. This means that ATE systems are regularly scored solely on precision (or some variation thereof), since recall and f1-score cannot be calculated without knowing all true terms in a corpus.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "ATE",
"sec_num": "2.2."
},
{
"text": "Considering the expense of data annotation, the extra effort required is rarely feasible. The strictness of the evaluation varies as well, such as determining how specialised a term candidate needs to be for it to be considered a true term, and validating only full matches or also partial ones. Moreover, scores for sequence labelling approaches are difficult to compare to scores for approaches that provide ranked lists of unique terms. There is even disagreement on the required expertise for annotators: do they need to be domain experts or terminologists? This disparity does not only make comparisons between systems highly problematic, it also means that many systems are evaluated on only a single domain (and language).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "ATE",
"sec_num": "2.2."
},
{
"text": "ACTER is a collection of domain-specific corpora in which terms have been manually annotated. It covers three languages (English, French, and Dutch) and four domains (corruption, dressage (equitation), heart failure, and wind energy). It has been created in light of some of the perceived difficulties that have been mentioned. A previous version (which did not yet bear the ACTER acronym) has already been elaborately described (Rigouts Terryn et al., 2019b), so we refer the interested reader to this work for more detailed information. However, the current version of the dataset has been substantially updated since then, to be even more consistent. All previous annotations have been double-checked, inconsistent annotations were automatically found and manually edited when necessary, and, with this shared task, a first version has been made publicly available. Therefore, the remainder of this section will focus on the up-to-date statistics of version 1.2 of the ACTER dataset (version 1.0 was the first to appear online for the shared task). The annotation guidelines have been updated as well and are freely available 1 . Discontinuous terms (e.g. in ellipses) have been annotated, but are not yet included in ACTER 1.2, and neither are the cross-lingual annotations in the domain of heart failure. The changes made between ACTER versions are indicated in detail in the included README.md file and the biggest difference between version 1.0 and 1.2 (besides some 120 removed or added annotations) is the inclusion of the label of each annotation. The dataset contains trilingual comparable corpora in all domains: the corpora in the same domain are similar in terms of subject, style, and length for each language, but they are not translations (and, therefore, cannot be aligned). Additionally, for the domain of corruption, there is a trilingual parallel corpus of aligned translations. For each language and domain, around 50k tokens have been manually annotated (in the case of corruption, the annotations have only been made in the parallel corpus, so the comparable corpus on corruption is completely unannotated). In all domains except heart failure, the complete corpora are larger than only the annotated parts, and unannotated texts are included (separately) as well. The texts are all plain text files and the sources have been included in the downloadable version. The annotations have been performed in the BRAT annotation tool (Stenetorp et al., 2011) , but they are currently provided as flat lists with one term per line. The annotations have all been performed by a single annotator with experience in the field of terminology and ATE and fluent in all three languages. However, she is not a domainexpert, except in the domain of dressage. Multiple semiautomatic checks have been performed to ensure the best possible annotation quality and inter-annotator agreement studies were performed and published (Rigouts Terryn et al., 2019b) to further validate the dataset. Furthermore, the elaborate guidelines helped the annotator to make consistent decisions and make the entire process more transparent. Nevertheless, term annotation remains an ambiguous bioprosthetic valve replacement Specific Term biopsies Common Term biopsy Common Term biosynthetic enzymes Specific Term bisoprolol Specific Term bisphosphonates Specific Term Table 1 : Sample of one of the gold standard term lists in the ACTER 1.2 dataset to illustrate the format and subjective task. We do not claim that ours is the only possible interpretation and, therefore, when using ACTER for ATE evaluation purposes, always recommend checking the output for a more nuanced evaluation (e.g. Rigouts Terryn et al. (2019a)). While ATE for TermEval has been perceived as a binary task (term or not), the original annotations included four different labels. There are three term labels, for which terms are defined by their degree of domain-specificity (are they relevant to the domain) and lexicon-specificity (are they known only by experts, or by laypersons as well).",
"cite_spans": [
{
"start": 2452,
"end": 2476,
"text": "(Stenetorp et al., 2011)",
"ref_id": "BIBREF36"
},
{
"start": 2941,
"end": 2962,
"text": "Terryn et al., 2019b)",
"ref_id": "BIBREF33"
}
],
"ref_spans": [
{
"start": 3213,
"end": 3376,
"text": "Specific Term biopsies Common Term biopsy Common Term biosynthetic enzymes Specific Term bisoprolol Specific Term bisphosphonates Specific Term Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "ACTER Annotated Corpora for Term Extraction Research",
"sec_num": "3."
},
{
"text": "The three term labels defined this way are: Specific Terms (which are both domain-and lexicon-specific), Common Terms (domain-specific, not lexicon-specific), and Outof-Domain (OOD) Terms (not domain-specific, lexiconspecific). In the domain of heart failure, for instance, ejection fraction might be a Specific Term: laypersons generally do not know what it means, and it is strongly related to the domain of heart failure, since it is an indication of the volume of blood the heart pumps on each contraction. Heart is an example of a Common Term: it is clearly domain-specific to heart failure and you do not need to be an expert to have a basic idea of what a heart is. An example of an OOD term might be p-value, which is lexicon-specific since you need some knowledge of statistics to know the term, but it is not domain-specific to heart failure. In addition to these three term labels, Named Entities (proper names of persons, organisations, etc.) were annotated as well, as they share a few characteristics with terms: they will appear more often in texts with a relevant subject (e.g. brand names of medicine in the field of heart failure) and, like multi-word terms, have a high degree of unithood (internal cohesion). Labelling these does not mean we consider them to be terms, but it offers more options for the evaluation and training based on the dataset. Since TermEval was set up as a binary task, all three term labels were combined and considered as true terms. There were two separate datasets regarding the Named Entities: one including both terms and Named Entities, one with only terms. All participating systems were evaluated on both datasets. Moreover, while the evaluation for the ranking of the participating systems was based only on these two binary interpretations, the four labels were made available afterwards for a more detailed evaluation of the results. The gold standard lists of terms were ordered alphabetically, so with no relation to their labels or degree of termhood. Table 1 shows a sample of such a gold standard list, with one unique term per line followed by its label. Tables 2 and 3 provide more details on ACTER 1.2. Ta-ble 2 shows the number of documents and words per corpus, both in the entire corpus and only the annotated part of the corpus. Table 3 provides details on the number of annotations per corpus, counting either all annotations or all unique annotations. In total, 119,455 term and Named Entity annotations have been made over 596,058 words, resulting in 19,002 unique annotations. As can be seen, the number of annotations within a domain is usually somewhat similar for all languages (since the corpora are comparable), with larger differences between the domains. Version 1.2 of ACTER only provides a list of all unique lowercased terms (and Named Entities) per corpus. The aim is to release future versions with all in-text annotation spans, where every occurrence of each term is annotated, so that it can be used for sequence-labelling approaches as well. It is important to note that, since the annotation process was completely manual, each occurrence of a term was evaluated separately. When a lexical unit was only considered a term in some contexts, it was only annotated in those specific contexts. For instance, the word sensitivity can be used in general language, where it will not be annotated, but also as a synonym of recall (true positive rate), in which case it was annotated as a term. Additional characteristics to bear in mind about these annotations are that nested annotations are allowed (as long as the nested part can be used as a term on its own), and that there were no restrictions on term length, term frequency, or term POS-pattern (apart from the condition that terms had to contain a content word). If a lexical unit was used as a term in the text, it was annotated, even if it was not the best or most frequently used term for a certain concept. The reasoning behind this strategy was that one of the most important applications of ATE is to be able to keep up with fast-evolving terminology in increasingly more specialised domains. If only well-established, frequent terms are annotated, the rare and/or new terms will be ignored, even though these could be particularly interesting for ATE. While these qualities were all chosen to best reflect the desired applications for ATE, they do result in a particularly difficult dataset for ATE, so f1-scores for ATE systems tested on ACTER are expected to be rather modest in comparison to some other datasets.",
"cite_spans": [],
"ref_spans": [
{
"start": 2011,
"end": 2018,
"text": "Table 1",
"ref_id": null
},
{
"start": 2117,
"end": 2131,
"text": "Tables 2 and 3",
"ref_id": "TABREF1"
},
{
"start": 2167,
"end": 2175,
"text": "Ta-ble 2",
"ref_id": "TABREF1"
},
{
"start": 2297,
"end": 2304,
"text": "Table 3",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "ACTER Annotated Corpora for Term Extraction Research",
"sec_num": "3."
},
{
"text": "The aim of the TermEval shared task was to provide a platform for researchers to work on the same task, with the same data, so that different methodologies for ATE can easily be compared and current strengths and weaknesses of ATE can be identified. During the training phase, participants all received the ACTER dataset as described in the previous section, with all domains apart from heart failure. The latter is provided during the final phase as the test set on which the scores are calculated. As described in the previous section, ACTER 1.2 consists of flat lists of unique terms per corpus, with one term per line. Since this first version of the shared task aims to focus on ATE in general, rather than term variation, all terms are lowercased, and only identical lowercased terms are merged in a single entry, without lemmatisation. Even when terms acquire . We do not discount the importance of ATE systems that handle term variation, but a choice was made to focus on the core task for the first edition of the task.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Setup",
"sec_num": "4.1."
},
{
"text": "There are three different tracks (one per language) and participants could enter in one or multiple tracks. When participants submitted their final results on the test data (as a flat list of unique lowercased terms, like the training data), f1-scores were calculated twice: once compared to the gold standard with only terms, once compared to the gold standard with both terms and Named Entities. These double scores did not influence the final ranking based on f1-scores. The dataset has been used for more detailed evaluations as well (see section 4.3) and participants were encouraged to report scores on the training domains in their own papers as well.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Setup",
"sec_num": "4.1."
},
{
"text": "Five teams participated in the shared task: TALN-LS2N (Hazem et al., 2020) , RACAI (Pais and Ion, 2020) , e-Terminology (Oliver and V\u00e0zquez, 2020) , NLPLab UQAM (no system description paper), and NYU (no system description paper but based on previous work in Meyers et al. (2018) ). NYU and RACAI participated only in the English track, TALN-LS2N participated in both the English and French tracks, and e-Terminology and NLPLab UQAM participated in all tracks. We refer to their own system description papers for more details, but will provide a short summary of each of their methodologies. Team NYU has applied an updated version of Termolator (Meyers et al., 2018) . Candidate terms are selected based on \"terminological chunking and abbreviations\". The terminological chunking focuses, among others, on nominalisations, out-of-vocabulary words, and technical adjectives (based on suffixes) to find terms. Constructions where full forms are followed by their abbreviations are also taken into account. Next, three distributional metrics (e.g. TFIDF) are combined with equal weights and a \"well-formedness score\" is calculated, using mainly linguistic and morphological information. Additionally, a relevance score is based on the results of an online search engine. The final selection of candidate terms is made based on the product of these three metrics. Due to the timing of the shared task, Termolator was not specifically tuned to the ACTER dataset.",
"cite_spans": [
{
"start": 54,
"end": 74,
"text": "(Hazem et al., 2020)",
"ref_id": "BIBREF13"
},
{
"start": 83,
"end": 103,
"text": "(Pais and Ion, 2020)",
"ref_id": "BIBREF26"
},
{
"start": 120,
"end": 146,
"text": "(Oliver and V\u00e0zquez, 2020)",
"ref_id": "BIBREF25"
},
{
"start": 259,
"end": 279,
"text": "Meyers et al. (2018)",
"ref_id": "BIBREF23"
},
{
"start": 646,
"end": 667,
"text": "(Meyers et al., 2018)",
"ref_id": "BIBREF23"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Participants",
"sec_num": "4.2."
},
{
"text": "Team e-Terminology uses the TSR (Token Slot Recognition) technique, implemented in TBXTools (Oliver and Vazquez, 2015; V\u00e0zquez and Oliver, 2018) . For Dutch, the statistical version of TBXTools is employed, for English and French the linguistic version is used. Stopwords are filtered out and all candidate terms that appear below a frequency threshold of two. As a terminological reference for each language (required for the TSR technique), the IATE database for 12-Law was chosen.",
"cite_spans": [
{
"start": 92,
"end": 118,
"text": "(Oliver and Vazquez, 2015;",
"ref_id": "BIBREF24"
},
{
"start": 119,
"end": 144,
"text": "V\u00e0zquez and Oliver, 2018)",
"ref_id": "BIBREF39"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Participants",
"sec_num": "4.2."
},
{
"text": "Team RACAI uses a combination of statistical approaches, such as an improved TextRank (Zhang et al., 2018) , TFIDF, clustering, and termhood features. Algorithms were adapted where possible to make use of pre-trained word embeddings and the result was generated using several voting and combinatorial approaches. Special attention is also paid to the detection of nested terms. Team TALN-LS2N uses BERT as a binary classification model for ATE. The model's input is represented as the concatenation of a sentence and a selected n-gram within the sentence. If the n-gram is a term, the input is labelled as positive training example. If not, a corresponding negative example is generated. Team NLPLab UQAM applied a bidirectional LSTM. Pre-trained GloVe word embedding were used to train a neural network-based model on the training corpora.",
"cite_spans": [
{
"start": 86,
"end": 106,
"text": "(Zhang et al., 2018)",
"ref_id": "BIBREF41"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Participants",
"sec_num": "4.2."
},
{
"text": "Precision, recall, and f1-scores were calculated both including and excluding Named Entities, for each team in all tracks. The scores and resulting ranking are presented in Table 3 . As can be seen, TALN-LS2N's system outperforms all others in the English and French tracks. NLPLab UQAM's system outperforms e-Terminology for the Dutch track (though their respective rankings for English and Dutch are reversed). Scores with and without Named Entities are usually very similar (average difference of one percentage point), with e-Terminology and NYU scoring slightly better when Named Entities are excluded, and the others scoring better when they are included. On average, precision is higher than recall, especially when Named Entities are included. However, there is much variation. For instance, TALN-LS2N's English system obtains 36-40% more recall than precision (the difference is only 6-9% for their French system). Comparatively, e-Terminology obtains 20% higher precision than recall on average and NLPLab UQAM obtains more balanced precision and recall scores. The number of extracted term candidates varies greatly as well, from 744 (e-Terminology in Dutch), to 5267 (TALN-LS2N in English). Therefore, even though TALN-LS2N achieves the highest f1-scores thanks to great recall in English, their system also produces most noise, with 3435 false positives (including Named Entities). The average number of extracted candidate terms 2038is not too different from the average number of terms in the gold standard (2422 incl. Named Entities, 1720 without). Looking at performance of systems in multiple tracks, there does not appear to be one language that is inherently easier or more difficult. TALN-LS2N's best performance is reached for French, e-Terminology's for English, and NLPLab UQAM's for Dutch. As with many other task within natural language processing, the methodology based on the BERT transformer model appears to outperform other approaches. However, the large gap between precision and recall for the English model, which is much smaller for the French model, may be an indication of an often-cited downside of deep learning models: their unpredictability. For ATE, predictability is cited as at least as important as f1-scores: \"for ATE to be usable, its results should be consistent, predictable and transparent\" (Kageura and Marshman, 2019) . Additionally, it appears that neural networks and word embeddings do not always work for this task, as demonstrated by the fact that, for English and French, NLPLab UQAM's bidirectional LSTM approach with GLOVE embeddings is ranked last, below non-neural approaches such as NYU's.",
"cite_spans": [
{
"start": 2341,
"end": 2369,
"text": "(Kageura and Marshman, 2019)",
"ref_id": "BIBREF15"
}
],
"ref_spans": [
{
"start": 173,
"end": 180,
"text": "Table 3",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Results",
"sec_num": "4.3."
},
{
"text": "Apart from the ranking based on f1-scores, three different aspects of the results are analysed in more detail: composition of the output, recall of terms with different frequencies, and recall of terms with different lengths. Figure 1 shows the first of these, illustrating the composition of the gold standard regarding the four annotation labels, versus the true positives from each team. The results are averaged over all languages, as the differences between the languages were small. False positives were not included, since these can be deduced from the precision scores. The graphs are relative, so they do not represent the absolute number of annotations per type, only the proportions. The order of the teams is the order of their ranks for the English track. A first observation is that all teams seem to extract at least some Named Entities, except for e-Terminology. This may be partly due to their low recall, but since they did not extract a single Named Entity in any of the languages, it does appear that their system is most focused on terms. While the differences are never extreme, the various systems do show some variation in this respect. For instance, the two lowest ranked systems can be seen to extract relatively more Common Terms. This may be an indication that they are sensitive to frequency, as many of the Specific Terms are rarer (e.g., e-Terminology employs a frequency threshold of two). Conversely, NYU's system appears to excel at extracting these Specific Terms and also extracts relatively few Named Entities. The output of two top-scoring teams has a very similar composition to the gold standard, which Table 4 : Scores (as percentages) and rank for all teams per track Figure 1 : Proportion of Specific, Common, and OOD Terms, and Named Entities in the gold standard versus the true positives extracted by each team (averaged over all languages if teams participated in multiple tracks). may be part of the explanation for their high scores, and, in the case of TALN-LS2N's system, may be related to their reliance on the training data.",
"cite_spans": [],
"ref_spans": [
{
"start": 226,
"end": 234,
"text": "Figure 1",
"ref_id": null
},
{
"start": 1643,
"end": 1650,
"text": "Table 4",
"ref_id": null
},
{
"start": 1710,
"end": 1718,
"text": "Figure 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Results",
"sec_num": "4.3."
},
{
"text": "A preference for Common Terms or Specific Terms can already give an indication of the system performance for rare terms, but we can also look directly at the recall of terms for various frequencies, as shown in Figure 2 . Here, the recall of all systems for various term frequencies is shown for the English track. Results for the other languages were similar, so will not be discussed separately. The dataset actually contains many hapax terms (which appear only once). In English, when Named Entities are included, there are 1121 (43%) hapax terms, 398 (15%) terms that appear twice, 220 (9%) terms that appear three times, 232 (9%) terms with a frequency between 4 and 5, 259 (10%) terms with a frequency between 5 and 10, 199 (8%) terms with a frequency between 10 and 25, and only 156 (6%) terms that appear more than 25 times. In line with previous findings on the difficulties of ATE, recall is lowest for hapax terms for all systems, and increases as frequency increases. Of course, e-Terminology has 0% recall for hapax terms due to the frequency cut-off, but the other systems also have difficulties. Notably, TALN-LS2N's system obtains a surprisingly stable recall for various frequencies and a very high recall of 64% for hapax terms. This is likely a consequence of the fact that they use none of the traditional statistical (frequency-related) metrics for ATE. Recall is almost always highest for the most frequent terms, though when looking at these frequent terms in more detail, recall appears to drop again for the most extreme cases (terms appearing over 100 times; not represented separately in Figure 2 ), presumably because these are more difficult to distinguish from common general language words.",
"cite_spans": [],
"ref_spans": [
{
"start": 211,
"end": 219,
"text": "Figure 2",
"ref_id": "FIGREF0"
},
{
"start": 1615,
"end": 1624,
"text": "Figure 2",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Results",
"sec_num": "4.3."
},
{
"text": "The final analysis concerns term length. Similarly to the analysis for frequency, Figure 3 presents recall for different term lengths per team, using the English data, including Named Entities, as a reference. The majority of gold standard terms are single-word terms (swts) (1170, or 45%), with frequencies decreasing as term length increases (800 or 31% 2-word terms (2wts), 376 or 15% 3wts, 144 or 6% 4wts, 40 or 2% 5wts, and 55 or 2% terms that are longer than 5 tokens. As can be seen in Figure 3 , two out of five teams (RACAI and NLPLab UQAM) have lower recall for 2wts than for swts, and, overall, recall decreases for terms with more than 3 tokens. TALN-LS2N extracts no terms beyond a length of 3 tokens at all, though this is different for their French system, where recall decreases more gradually with term length. NYU's system has a surprisingly stable performance for different term lengths, especially compared to TALN-LS2N and RACAI.",
"cite_spans": [],
"ref_spans": [
{
"start": 82,
"end": 90,
"text": "Figure 3",
"ref_id": null
},
{
"start": 493,
"end": 501,
"text": "Figure 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "Results",
"sec_num": "4.3."
},
{
"text": "Five different teams submitted their results for the Ter-mEval shared task on ATE, based on the ACTER dataset. With the domains of corruption, dressage, and wind energy from the dataset as training data or simply as reference material, the teams either used (and adapted) their existing systems or developed a new methodology for ATE. The domain of heart failure was used as the test set, with three different tracks for English, French and Dutch. The teams were all ranked based on the f1-score they obtained on the test data, with additional evaluations of the types of terms they extracted and recall for different term frequencies and term lengths.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion and Conclusions",
"sec_num": "5."
},
{
"text": "The results show quite a large variation between all methodologies. The highest scores were obtained by a deep learning methodology using BERT as a binary classification model. The second best system does not rely on deep learning and combines pre-trained word embeddings with more classical features for ATE, such as statistical termhood measures. Such results show how there is still a lot of potential for deep learning techniques in the field of ATE, highlighting also the importance of large datasets like ACTER. However, it also illustrates that more traditional methodologies can still lead to state-of-the-art results as well, especially when updated with features like word em-beddings. The more detailed analyses also revealed how the composition of the output of the different systems varies, e.g., including or excluding more Named Entities, and focusing on either the most domain-specific and specialised terms (Specific Terms) or also on more general terms (Common Terms). This is a clear indication of how different applications for ATE may require different methodologies. For instance, translators may be more interested in a system that extracts mostly Specific Terms, since Common Terms may already be part of their general vocabulary. Checking recall for terms with different frequencies and terms with different lengths confirmed two often-cited weaknesses of ATE: low-frequency terms and long terms are more difficult to extract. However, in each case, there were some systems for which the performance was more stable and less impacted by these factors. The winning deep learning approach achieves a high recall even for hapax terms (64%) and one of the rule-based systems maintains a more or less stable recall for terms up to a length of five tokens. With these results, we conclude that there remains a lot of room for improvement in the field of ATE, both by trying the latest deep learning methodologies which have been successfully used in other natural language processing tasks, and by updating and combining more traditional methodologies with state-of-the-art features and algorithms. Taking into account the unpredictability of many machine learning approaches and the considerable variety between the potential outputs, as demonstrated in this shared task, it is essential for ATE to be evaluated beyond precision, recall, and f1-scores. To further encourage and facilitate both supervised machine learning approaches and high-quality evaluations on diverse data, the complete AC-TER dataset has been made freely available online (Rigouts Terryn, Ayla and Drouin, Patrick and Hoste, V\u00e9ronique and Lefever, Els, 2020).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion and Conclusions",
"sec_num": "5."
},
{
"text": "http://hdl.handle.net/1854/LU-8503113",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Local-Global Vectors to Improve Unigram Terminology Extraction",
"authors": [
{
"first": "E",
"middle": [],
"last": "Amjadian",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Inkpen",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Paribakht",
"suffix": ""
},
{
"first": "F",
"middle": [],
"last": "Faez",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of the 5th International Workshop on Computational Terminology",
"volume": "",
"issue": "",
"pages": "2--11",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Amjadian, E., Inkpen, D., Paribakht, T., and Faez, F. (2016). Local-Global Vectors to Improve Unigram Ter- minology Extraction. In Proceedings of the 5th Interna- tional Workshop on Computational Terminology, pages 2-11, Osaka, Japan.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Preference Learning in Terminology Extraction: A ROCbased approach",
"authors": [
{
"first": "J",
"middle": [],
"last": "Az\u00e9",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Roche",
"suffix": ""
},
{
"first": "Y",
"middle": [],
"last": "Kodratoff",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Sebag",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceeedings of Applied Stochastic Models and Data Analysis",
"volume": "",
"issue": "",
"pages": "209--2019",
"other_ids": {
"arXiv": [
"arXiv:cs/0512050"
]
},
"num": null,
"urls": [],
"raw_text": "Az\u00e9, J., Roche, M., Kodratoff, Y., and Sebag, M. (2005). Preference Learning in Terminology Extraction: A ROC- based approach. In Proceeedings of Applied Stochas- tic Models and Data Analysis, pages 209-2019, Brest, France. arXiv: cs/0512050.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Concept annotation in the CRAFT corpus",
"authors": [
{
"first": "M",
"middle": [],
"last": "Bada",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Eckert",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Evans",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Garcia",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Shipley",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Sitnikov",
"suffix": ""
},
{
"first": "W",
"middle": [
"A"
],
"last": "Baumgartner",
"suffix": ""
},
{
"first": "K",
"middle": [
"B"
],
"last": "Cohen",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Verspoor",
"suffix": ""
},
{
"first": "J",
"middle": [
"A"
],
"last": "Blake",
"suffix": ""
},
{
"first": "L",
"middle": [
"E"
],
"last": "Hunter",
"suffix": ""
}
],
"year": 2012,
"venue": "BMC Bioinformatics",
"volume": "13",
"issue": "",
"pages": "161--180",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Bada, M., Eckert, M., Evans, D., Garcia, K., Shipley, K., Sitnikov, D., Baumgartner, W. A., Cohen, K. B., Ver- spoor, K., Blake, J. A., and Hunter, L. E. (2012). Con- cept annotation in the CRAFT corpus. BMC Bioinfor- matics, 13:161-180.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Creating a test corpus for term extractors through term annotation",
"authors": [
{
"first": "G",
"middle": [],
"last": "Bernier-Colborne",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Drouin",
"suffix": ""
}
],
"year": 2014,
"venue": "Terminology",
"volume": "20",
"issue": "1",
"pages": "50--73",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Bernier-Colborne, G. and Drouin, P. (2014). Creating a test corpus for term extractors through term annotation. Terminology, 20(1):50-73.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Defining a Gold Standard for the Evaluation of Term Extractors",
"authors": [
{
"first": "G",
"middle": [],
"last": "Bernier-Colborne",
"suffix": ""
}
],
"year": 2012,
"venue": "Proceedings of the 8th international conference on Language Resources and Evaluation (LREC)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Bernier-Colborne, G. (2012). Defining a Gold Standard for the Evaluation of Term Extractors. In Proceedings of the 8th international conference on Language Resources and Evaluation (LREC), Istanbul, Turkey. ELRA.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Annotation s\u00e9mantique et validation terminologique en texte int\u00e9gral en SHS",
"authors": [
{
"first": "M",
"middle": [],
"last": "Billami",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Camacho-Collados",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Jacquey",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "Kister",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of TALN 2014",
"volume": "",
"issue": "",
"pages": "363--376",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Billami, M., Camacho-Collados, J., Jacquey, E., and Kister, L. (2014). Annotation s\u00e9mantique et validation termi- nologique en texte int\u00e9gral en SHS. In Proceedings of TALN 2014, pages 363-376, Marseille, France.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Topic Models Can Improve Domain Term Extraction",
"authors": [
{
"first": "E",
"middle": [],
"last": "Bolshakova",
"suffix": ""
},
{
"first": "N",
"middle": [],
"last": "Loukachevitch",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Nokel",
"suffix": ""
}
],
"year": 2013,
"venue": "Advances in Information Retrieval",
"volume": "7814",
"issue": "",
"pages": "684--687",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Bolshakova, E., Loukachevitch, N., and Nokel, M. (2013). Topic Models Can Improve Domain Term Extraction. In David Hutchison, et al., editors, Advances in Information Retrieval, volume 7814, pages 684-687. Springer Berlin Heidelberg, Berlin, Heidelberg.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Term Extraction Using Non-Technical Corpora as a Point of Leverage",
"authors": [
{
"first": "P",
"middle": [],
"last": "Drouin",
"suffix": ""
}
],
"year": 2003,
"venue": "Terminology",
"volume": "9",
"issue": "1",
"pages": "99--115",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Drouin, P. (2003). Term Extraction Using Non-Technical Corpora as a Point of Leverage. Terminology, 9(1):99- 115.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Les unit\u00e9s de signification sp\u00e9cialis\u00e9e\u015b elargissant l'objet du travail en terminologie",
"authors": [
{
"first": "R",
"middle": [],
"last": "Estop\u00e0",
"suffix": ""
}
],
"year": 2001,
"venue": "Terminology",
"volume": "7",
"issue": "2",
"pages": "217--237",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Estop\u00e0, R. (2001). Les unit\u00e9s de signification sp\u00e9cialis\u00e9e\u015b elargissant l'objet du travail en terminologie. Terminol- ogy, 7(2):217-237.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Automatic recognition of domain-specific terms: an experimental evaluation",
"authors": [
{
"first": "D",
"middle": [],
"last": "Fedorenko",
"suffix": ""
},
{
"first": "N",
"middle": [],
"last": "Astrakhantsev",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Turdakov",
"suffix": ""
},
{
"first": "Russia",
"middle": [],
"last": "Kazan",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Foo",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Merkel",
"suffix": ""
}
],
"year": 2010,
"venue": "Proceedings of the LREC 2010 Workshop on Methods for automatic acquisition of Language Resources and their evaluation methods",
"volume": "26",
"issue": "",
"pages": "49--54",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Fedorenko, D., Astrakhantsev, N., and Turdakov, D. (2013). Automatic recognition of domain-specific terms: an experimental evaluation. In Proceedings of the Ninth Spring Researcher's Colloquium on Database and Infor- mation Systems, volume 26, pages 15-23, Kazan, Russia. Foo, J. and Merkel, M. (2010). Using machine learning to perform automatic term recognition. In Proceedings of the LREC 2010 Workshop on Methods for automatic acquisition of Language Resources and their evaluation methods, pages 49-54, Valetta, Malta. ELRA.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Feature-Less End-to-End Nested Term Extraction",
"authors": [
{
"first": "Y",
"middle": [],
"last": "Gao",
"suffix": ""
},
{
"first": "Y",
"middle": [],
"last": "Yuan",
"suffix": ""
}
],
"year": 2019,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1908.05426"
]
},
"num": null,
"urls": [],
"raw_text": "Gao, Y. and Yuan, Y. (2019). Feature-Less End-to-End Nested Term Extraction. arXiv:1908.05426 [cs, stat], August. arXiv: 1908.05426.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Fine-Grained Termhood Prediction for German Compound Terms Using Neural Networks",
"authors": [
{
"first": "A",
"middle": [],
"last": "H\u00e4tty",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Schulte Im Walde",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the Joint Workshop on,Linguistic Annotation, Multiword Expressions and Constructions",
"volume": "",
"issue": "",
"pages": "62--73",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "H\u00e4tty, A. and Schulte im Walde, S. (2018a). Fine-Grained Termhood Prediction for German Compound Terms Us- ing Neural Networks. In Proceedings of the Joint Work- shop on,Linguistic Annotation, Multiword Expressions and Constructions (LAW-MWE-CxG-2018), pages 62- 73, Sante Fe, New Mexico, USA.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "A Laypeople Study on Terminology Identification across Domains and Task Definitions",
"authors": [
{
"first": "A",
"middle": [],
"last": "H\u00e4tty",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Schulte Im Walde",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of NAACL-HLT 2018",
"volume": "",
"issue": "",
"pages": "321--326",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "H\u00e4tty, A. and Schulte im Walde, S. (2018b). A Laypeople Study on Terminology Identification across Domains and Task Definitions. In Proceedings of NAACL-HLT 2018, pages 321-326, New Orleans, USA. ACL.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Termeval 2020: Taln-ls2n system for automatic term extraction",
"authors": [
{
"first": "A",
"middle": [],
"last": "Hazem",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Bouhandi",
"suffix": ""
},
{
"first": "F",
"middle": [],
"last": "Boudin",
"suffix": ""
},
{
"first": "B",
"middle": [],
"last": "Daille",
"suffix": ""
}
],
"year": 2020,
"venue": "Proceedings of CompuTerm",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hazem, A., Bouhandi, M., Boudin, F., and Daille, B. (2020). Termeval 2020: Taln-ls2n system for automatic term extraction. In Proceedings of CompuTerm 2020.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Unsupervised training set generation for automatic acquisition of technical terminology in patents",
"authors": [
{
"first": "A",
"middle": [],
"last": "Judea",
"suffix": ""
},
{
"first": "H",
"middle": [],
"last": "Sch\u00fctze",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Br\u00fcgmann",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of COLING 2014, the 25th international conference on computational linguistics: Technical Papers",
"volume": "",
"issue": "",
"pages": "290--300",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Judea, A., Sch\u00fctze, H., and Br\u00fcgmann, S. (2014). Unsu- pervised training set generation for automatic acquisi- tion of technical terminology in patents. In Proceedings of COLING 2014, the 25th international conference on computational linguistics: Technical Papers, pages 290- 300, Dublin, Ireland.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Terminology Extraction and Management",
"authors": [
{
"first": "K",
"middle": [],
"last": "Kageura",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Marshman",
"suffix": ""
}
],
"year": 2019,
"venue": "The Routledge Handbook of Translation and Technology",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kageura, K. and Marshman, E. (2019). Terminology Ex- traction and Management. In O'Hagan, Minako, editor, The Routledge Handbook of Translation and Technology.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Methods of automatic term recognition",
"authors": [
{
"first": "K",
"middle": [],
"last": "Kageura",
"suffix": ""
},
{
"first": "B",
"middle": [],
"last": "Umino",
"suffix": ""
}
],
"year": 1996,
"venue": "Terminology",
"volume": "3",
"issue": "2",
"pages": "259--289",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kageura, K. and Umino, B. (1996). Methods of automatic term recognition. Terminology, 3(2):259-289.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Extraction of terminology in the field of construction",
"authors": [
{
"first": "R",
"middle": [],
"last": "Kessler",
"suffix": ""
},
{
"first": "N",
"middle": [],
"last": "B\u00e9chet",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Berio",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the First International Conference on Digital Data Processing (DDP)",
"volume": "",
"issue": "",
"pages": "22--26",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kessler, R., B\u00e9chet, N., and Berio, G. (2019). Extraction of terminology in the field of construction. In Proceedings of the First International Conference on Digital Data Processing (DDP), pages 22-26, London, UK. IEEE.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "GE-NIA corpus -a semantically annotated corpus for biotextmining",
"authors": [
{
"first": "J.-D",
"middle": [],
"last": "Kim",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Ohta",
"suffix": ""
},
{
"first": "Y",
"middle": [],
"last": "Tateisi",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Tsujii",
"suffix": ""
}
],
"year": 2003,
"venue": "Bioinformatics",
"volume": "19",
"issue": "1",
"pages": "180--182",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kim, J.-D., Ohta, T., Tateisi, Y., and Tsujii, J. (2003). GE- NIA corpus -a semantically annotated corpus for bio- textmining. Bioinformatics, 19(1):180-182.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Optimized Term Extraction Method Based on Computing Merged Partial C-Values",
"authors": [
{
"first": "V",
"middle": [],
"last": "Kosa",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Chaves-Fraga",
"suffix": ""
},
{
"first": "H",
"middle": [],
"last": "Dobrovolskyi",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Ermolayev",
"suffix": ""
}
],
"year": 2020,
"venue": "Information and Communication Technologies in Education, Research, and Industrial Applications. ICTERI 2019",
"volume": "1175",
"issue": "",
"pages": "24--49",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kosa, V., Chaves-Fraga, D., Dobrovolskyi, H., and Ermo- layev, V. (2020). Optimized Term Extraction Method Based on Computing Merged Partial C-Values. In Infor- mation and Communication Technologies in Education, Research, and Industrial Applications. ICTERI 2019, volume 1175 of Communications in Computer and IN- formation Science, pages 24-49. Springer International Publishing, Cham.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "Term Extraction via Neural Sequence Labeling a Comparative Evaluation of Strategies Using Recurrent Neural Networks",
"authors": [
{
"first": "M",
"middle": [],
"last": "Kucza",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Niehues",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Zenkel",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Waibel",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "St\u00fcker",
"suffix": ""
}
],
"year": 2018,
"venue": "",
"volume": "",
"issue": "",
"pages": "2072--2076",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kucza, M., Niehues, J., Zenkel, T., Waibel, A., and St\u00fcker, S. (2018). Term Extraction via Neural Sequence Label- ing a Comparative Evaluation of Strategies Using Recur- rent Neural Networks. In Interspeech 2018, pages 2072- 2076, Hyderabad, India, September. ISCA.",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "Reference Lists for the Evaluation of Term Extraction Tools",
"authors": [
{
"first": "E",
"middle": [],
"last": "Loginova",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Gojun",
"suffix": ""
},
{
"first": "H",
"middle": [],
"last": "Blancafort",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Gu\u00e9gan",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Gornostay",
"suffix": ""
},
{
"first": "U",
"middle": [],
"last": "Heid",
"suffix": ""
}
],
"year": 2012,
"venue": "Proceedings of the 10th International Congress on Terminology and Knowledge Engineering",
"volume": "",
"issue": "",
"pages": "2401--2407",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Loginova, E., Gojun, A., Blancafort, H., Gu\u00e9gan, M., Gornostay, T., and Heid, U. (2012). Reference Lists for the Evaluation of Term Extraction Tools. In Proceedings of the 10th International Congress on Terminology and Knowledge Engineering, Madrid, Spain. ACL. Loukachevitch, N. (2012). Automatic Term Recognition Needs Multiple Evidence. In Proceedings of LREC 2012, pages 2401-2407, Istanbul, Turkey. ELRA.",
"links": null
},
"BIBREF22": {
"ref_id": "b22",
"title": "TExSIS: Bilingual Terminology Extraction from Parallel Corpora Using Chunk-based Alignment",
"authors": [
{
"first": "L",
"middle": [],
"last": "Macken",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Lefever",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Hoste",
"suffix": ""
}
],
"year": 2013,
"venue": "Terminology",
"volume": "19",
"issue": "1",
"pages": "1--30",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Macken, L., Lefever, E., and Hoste, V. (2013). TExSIS: Bilingual Terminology Extraction from Parallel Corpora Using Chunk-based Alignment. Terminology, 19(1):1- 30.",
"links": null
},
"BIBREF23": {
"ref_id": "b23",
"title": "The Termolator: Terminology Recognition Based on Chunking, Statistical and Search-Based Scores. Frontiers in Research Metrics and Analytics",
"authors": [
{
"first": "A",
"middle": [
"L"
],
"last": "Meyers",
"suffix": ""
},
{
"first": "Y",
"middle": [],
"last": "He",
"suffix": ""
},
{
"first": "Z",
"middle": [],
"last": "Glass",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Ortega",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Liao",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Grieve-Smith",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Grishman",
"suffix": ""
},
{
"first": "O",
"middle": [],
"last": "Babko-Malaya",
"suffix": ""
}
],
"year": 2018,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Meyers, A. L., He, Y., Glass, Z., Ortega, J., Liao, S., Grieve-Smith, A., Grishman, R., and Babko-Malaya, O. (2018). The Termolator: Terminology Recogni- tion Based on Chunking, Statistical and Search-Based Scores. Frontiers in Research Metrics and Analytics, 3.",
"links": null
},
"BIBREF24": {
"ref_id": "b24",
"title": "TBXTools: A Free, Fast and Flexible Tool for Automatic Terminology Extraction",
"authors": [
{
"first": "A",
"middle": [],
"last": "Oliver",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Vazquez",
"suffix": ""
}
],
"year": 2015,
"venue": "Proceedings of Recent Advances in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "473--479",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Oliver, A. and Vazquez, M. (2015). TBXTools: A Free, Fast and Flexible Tool for Automatic Terminology Ex- traction. In Proceedings of Recent Advances in Natural Language Processing, pages 473-479, Hissar, Bulgaria.",
"links": null
},
"BIBREF25": {
"ref_id": "b25",
"title": "Termeval 2020: Using tsr filtering method to improve automatic term extraction",
"authors": [
{
"first": "A",
"middle": [],
"last": "Oliver",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "V\u00e0zquez",
"suffix": ""
}
],
"year": 2020,
"venue": "Proceedings of CompuTerm",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Oliver, A. and V\u00e0zquez, M. (2020). Termeval 2020: Using tsr filtering method to improve automatic term extrac- tion. In Proceedings of CompuTerm 2020.",
"links": null
},
"BIBREF26": {
"ref_id": "b26",
"title": "Termeval 2020: Racai's automatic term extraction system",
"authors": [
{
"first": "V",
"middle": [],
"last": "Pais",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Ion",
"suffix": ""
}
],
"year": 2020,
"venue": "Proceedings of Com-puTerm 2020",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Pais, V. and Ion, R. (2020). Termeval 2020: Racai's au- tomatic term extraction system. In Proceedings of Com- puTerm 2020.",
"links": null
},
"BIBREF27": {
"ref_id": "b27",
"title": "Karst Exploration: Extracting Terms and Definitions from Karst Domain Corpus",
"authors": [
{
"first": "S",
"middle": [],
"last": "Pollak",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Repar",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Martinc",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Podpe\u010dan",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of eLex 2019",
"volume": "",
"issue": "",
"pages": "934--956",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Pollak, S., Repar, A., Martinc, M., and Podpe\u010dan, V. (2019). Karst Exploration: Extracting Terms and Def- initions from Karst Domain Corpus. In Proceedings of eLex 2019, pages 934-956, Sintra, Portugal.",
"links": null
},
"BIBREF28": {
"ref_id": "b28",
"title": "Investigating Context Parameters in Technology Term Recognition",
"authors": [
{
"first": "B",
"middle": [],
"last": "Qasemizadeh",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Handschuh",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of SADAATL 2014",
"volume": "",
"issue": "",
"pages": "1--10",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Qasemizadeh, B. and Handschuh, S. (2014). Investigating Context Parameters in Technology Term Recognition. In Proceedings of SADAATL 2014, pages 1-10, Dublin, Ire- land.",
"links": null
},
"BIBREF29": {
"ref_id": "b29",
"title": "The ACL RD-TEC 2.0: A Language Resource for Evaluating Term Extraction and Entity Recognition Methods",
"authors": [
{
"first": "B",
"middle": [],
"last": "Qasemizadeh",
"suffix": ""
},
{
"first": "A.-K",
"middle": [],
"last": "Schumann",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of LREC 2016",
"volume": "",
"issue": "",
"pages": "1862--1868",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Qasemizadeh, B. and Schumann, A.-K. (2016). The ACL RD-TEC 2.0: A Language Resource for Evaluating Term Extraction and Entity Recognition Methods. In Proceed- ings of LREC 2016, pages 1862-1868, Portoro\u017e, Slove- nia. ELRA.",
"links": null
},
"BIBREF31": {
"ref_id": "b31",
"title": "Multiword Expressions in the wild? The mwetoolkit comes in handy",
"authors": [],
"year": null,
"venue": "Coling 2010: Demonstration Volume",
"volume": "",
"issue": "",
"pages": "57--60",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Multiword Expressions in the wild? The mwetoolkit comes in handy. In Coling 2010: Demonstration Vol- ume, pages 57-60, Beijing, China.",
"links": null
},
"BIBREF32": {
"ref_id": "b32",
"title": "Analysing the Impact of Supervised Machine Learning on Automatic Term Extraction: HAMLET vs TermoStat",
"authors": [
{
"first": "A",
"middle": [],
"last": "Rigouts Terryn",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Drouin",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Hoste",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Lefever",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of RANLP 2019",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Rigouts Terryn, A., Drouin, P., Hoste, V., and Lefever, E. (2019a). Analysing the Impact of Supervised Machine Learning on Automatic Term Extraction: HAMLET vs TermoStat. In Proceedings of RANLP 2019, Varna, Bul- garia.",
"links": null
},
"BIBREF33": {
"ref_id": "b33",
"title": "In No Uncertain Terms: A Dataset for Monolingual and Multilingual Automatic Term Extraction from Comparable Corpora",
"authors": [
{
"first": "A",
"middle": [],
"last": "Rigouts Terryn",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Hoste",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Lefever",
"suffix": ""
}
],
"year": 2019,
"venue": "Language Resources and Evaluation",
"volume": "",
"issue": "",
"pages": "1--34",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Rigouts Terryn, A., Hoste, V., and Lefever, E. (2019b). In No Uncertain Terms: A Dataset for Monolingual and Multilingual Automatic Term Extraction from Compara- ble Corpora. Language Resources and Evaluation, pages 1-34.",
"links": null
},
"BIBREF34": {
"ref_id": "b34",
"title": "Compasses, Magnets, Water Microscopes",
"authors": [
{
"first": "A.-K",
"middle": [],
"last": "Schumann",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Fischer",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of LREC 2016",
"volume": "",
"issue": "",
"pages": "3578--3584",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Schumann, A.-K. and Fischer, S. (2016). Compasses, Magnets, Water Microscopes. In Proceedings of LREC 2016, pages 3578-3584, Portoro\u017e, Slovenia. ELRA.",
"links": null
},
"BIBREF35": {
"ref_id": "b35",
"title": "Similarity Driven Unsupervised Learning for Materials Science Terminology Extraction",
"authors": [
{
"first": "S",
"middle": [],
"last": "Shah",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Sarath",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Shreedhar",
"suffix": ""
}
],
"year": 2019,
"venue": "Computaci\u00f3n y Sistemas",
"volume": "23",
"issue": "3",
"pages": "1005--1013",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Shah, S., Sarath, S., and Shreedhar, R. (2019). Simi- larity Driven Unsupervised Learning for Materials Sci- ence Terminology Extraction. Computaci\u00f3n y Sistemas, 23(3):1005-1013.",
"links": null
},
"BIBREF36": {
"ref_id": "b36",
"title": "BioNLP Shared Task 2011: Supporting Resources",
"authors": [
{
"first": "P",
"middle": [],
"last": "Stenetorp",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Topi\u0107",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Pyysalo",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Ohta",
"suffix": ""
},
{
"first": "J.-D",
"middle": [],
"last": "Kim",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Tsujii",
"suffix": ""
}
],
"year": 2011,
"venue": "Proceedings of BioNLP Shared Task",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Stenetorp, P., Topi\u0107, G., Pyysalo, S., Ohta, T., Kim, J.-D., and Tsujii, J. (2011). BioNLP Shared Task 2011: Sup- porting Resources. In Proceedings of BioNLP Shared Task 2011 Workshop.",
"links": null
},
"BIBREF37": {
"ref_id": "b37",
"title": "Annotation s\u00e9mantique et terminologique avec la plateforme SMARTIES",
"authors": [
{
"first": "P",
"middle": [],
"last": "Termith",
"suffix": ""
}
],
"year": 2014,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "TermITH, P. (2014). Annotation s\u00e9mantique et termi- nologique avec la plateforme SMARTIES.",
"links": null
},
"BIBREF38": {
"ref_id": "b38",
"title": "Improving term extraction by combining different techniques",
"authors": [
{
"first": "J",
"middle": [],
"last": "Vivaldi",
"suffix": ""
},
{
"first": "H",
"middle": [],
"last": "Rodr\u00edguez",
"suffix": ""
}
],
"year": 2001,
"venue": "Terminology",
"volume": "7",
"issue": "1",
"pages": "31--48",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Vivaldi, J. and Rodr\u00edguez, H. (2001). Improving term ex- traction by combining different techniques. Terminol- ogy, 7(1):31-48, December.",
"links": null
},
"BIBREF39": {
"ref_id": "b39",
"title": "Improving term candidates selection using terminological tokens",
"authors": [
{
"first": "M",
"middle": [],
"last": "V\u00e0zquez",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Oliver",
"suffix": ""
}
],
"year": 2018,
"venue": "Terminology",
"volume": "24",
"issue": "1",
"pages": "122--147",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "V\u00e0zquez, M. and Oliver, A. (2018). Improving term can- didates selection using terminological tokens. Terminol- ogy, 24(1):122-147, May.",
"links": null
},
"BIBREF40": {
"ref_id": "b40",
"title": "Featureless Domain-Specic Term Extraction with Minimal Labelled Data",
"authors": [
{
"first": "R",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "W",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "C",
"middle": [],
"last": "Mcdonald",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of Australasian Language Technology Association Workshop",
"volume": "",
"issue": "",
"pages": "103--112",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Wang, R., Liu, W., and McDonald, C. (2016). Featureless Domain-Specic Term Extraction with Minimal Labelled Data. In Proceedings of Australasian Language Technol- ogy Association Workshop, pages 103-112, Melbourne, Australia.",
"links": null
},
"BIBREF41": {
"ref_id": "b41",
"title": "Adapted TextRank for Term Extraction: A Generic Method of Improving Automatic Term Extraction Algorithms",
"authors": [
{
"first": "Z",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Petrak",
"suffix": ""
},
{
"first": "Maynard",
"middle": [],
"last": "",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "",
"suffix": ""
}
],
"year": 2018,
"venue": "ACM Transactions on Knowledge Discovery from Data",
"volume": "12",
"issue": "5",
"pages": "1--7",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Zhang, Z., Petrak, J., and Maynard, D. (2018). Adapted TextRank for Term Extraction: A Generic Method of Improving Automatic Term Extraction Algorithms. ACM Transactions on Knowledge Discovery from Data, 12(5):1-7.",
"links": null
},
"BIBREF42": {
"ref_id": "b42",
"title": "Evaluating Automatic Term Extraction Methods on Individual Documents",
"authors": [
{
"first": "A",
"middle": [],
"last": "Sajatovi\u0107",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Buljan",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "\u0160najder",
"suffix": ""
},
{
"first": "B",
"middle": [
"D"
],
"last": "Ba\u0161i\u0107",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the Joint Workshop on Multiword Expressions and WordNet (MWE-WN 2019)",
"volume": "",
"issue": "",
"pages": "149--154",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sajatovi\u0107, A., Buljan, M.,\u0160najder, J., and Ba\u0161i\u0107, B. D. (2019). Evaluating Automatic Term Extraction Meth- ods on Individual Documents. In Proceedings of the Joint Workshop on Multiword Expressions and WordNet (MWE-WN 2019), pages 149-154, Florence, Italy. ACL. 7. Language Resource References",
"links": null
},
"BIBREF43": {
"ref_id": "b43",
"title": "Annotated Corpora for Term Extraction Research (ACTER)",
"authors": [
{
"first": "Rigouts",
"middle": [],
"last": "Terryn",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Ayla",
"suffix": ""
},
{
"first": "Patrick",
"middle": [],
"last": "Drouin",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Hoste",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "V\u00e9ronique",
"suffix": ""
},
{
"first": "Els",
"middle": [],
"last": "Lefever",
"suffix": ""
}
],
"year": 2020,
"venue": "Ghent University",
"volume": "1",
"issue": "2",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Rigouts Terryn, Ayla and Drouin, Patrick and Hoste, V\u00e9ronique and Lefever, Els. (2020). Annotated Corpora for Term Extraction Research (ACTER). Ghent Univer- sity, 1.2.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"text": "Recall for terms with various frequencies per team in English, including Named Entities Figure 3: Recall per term length (single-word terms (swts) to terms with over 5 tokens (5+wts) for each team in English, including Named Entities",
"type_str": "figure",
"uris": null,
"num": null
},
"TABREF1": {
"num": null,
"content": "<table><tr><td># Annotations</td></tr></table>",
"text": "Number of documents and words in the entire corpus vs. the annotated part of each corpus in ACTER 1.2",
"html": null,
"type_str": "table"
},
"TABREF2": {
"num": null,
"content": "<table/>",
"text": "Number of annotations (counting all annotations separately or all unique annotations) of terms and Named Entities (NEs), per corpus in ACTER 1.2 a different meaning through different capitalisation options or POS patterns, they only count as a single annotation in this version. For example, the English corpus on dressage contains the term bent (verb -past tense of to bend), but also Bent (proper noun -person name). While both capitalisation and POS differ, and bent is not the lemmatised form, there is only one entry: bent (lowercased) in the gold standard (other full forms of the verb to bend have separate entries, if they are present and annotated in the corpus)",
"html": null,
"type_str": "table"
}
}
}
} |