File size: 91,761 Bytes
6fa4bc9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 | {
"paper_id": "P12-1003",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T09:27:05.259694Z"
},
"title": "Prediction of Learning Curves in Machine Translation",
"authors": [
{
"first": "Prasanth",
"middle": [],
"last": "Kolachina",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "IIIT-Hyderabad",
"location": {
"settlement": "Hyderabad",
"country": "India"
}
},
"email": ""
},
{
"first": "Nicola",
"middle": [],
"last": "Cancedda",
"suffix": "",
"affiliation": {
"laboratory": "Xerox Research Centre Europe",
"institution": "",
"location": {
"addrLine": "6 chemin de Maupertuis",
"postCode": "38240",
"settlement": "Meylan",
"country": "France"
}
},
"email": ""
},
{
"first": "Marc",
"middle": [],
"last": "Dymetman",
"suffix": "",
"affiliation": {
"laboratory": "Xerox Research Centre Europe",
"institution": "",
"location": {
"addrLine": "6 chemin de Maupertuis",
"postCode": "38240",
"settlement": "Meylan",
"country": "France"
}
},
"email": ""
},
{
"first": "Sriram",
"middle": [],
"last": "Venkatapathy",
"suffix": "",
"affiliation": {
"laboratory": "Xerox Research Centre Europe",
"institution": "",
"location": {
"addrLine": "6 chemin de Maupertuis",
"postCode": "38240",
"settlement": "Meylan",
"country": "France"
}
},
"email": ""
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Parallel data in the domain of interest is the key resource when training a statistical machine translation (SMT) system for a specific purpose. Since ad-hoc manual translation can represent a significant investment in time and money, a prior assesment of the amount of training data required to achieve a satisfactory accuracy level can be very useful. In this work, we show how to predict what the learning curve would look like if we were to manually translate increasing amounts of data. We consider two scenarios, 1) Monolingual samples in the source and target languages are available and 2) An additional small amount of parallel corpus is also available. We propose methods for predicting learning curves in both these scenarios.",
"pdf_parse": {
"paper_id": "P12-1003",
"_pdf_hash": "",
"abstract": [
{
"text": "Parallel data in the domain of interest is the key resource when training a statistical machine translation (SMT) system for a specific purpose. Since ad-hoc manual translation can represent a significant investment in time and money, a prior assesment of the amount of training data required to achieve a satisfactory accuracy level can be very useful. In this work, we show how to predict what the learning curve would look like if we were to manually translate increasing amounts of data. We consider two scenarios, 1) Monolingual samples in the source and target languages are available and 2) An additional small amount of parallel corpus is also available. We propose methods for predicting learning curves in both these scenarios.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Parallel data in the domain of interest is the key resource when training a statistical machine translation (SMT) system for a specific business purpose. In many cases it is possible to allocate some budget for manually translating a limited sample of relevant documents, be it via professional translation services or through increasingly fashionable crowdsourcing. However, it is often difficult to predict how much training data will be required to achieve satisfactory translation accuracy, preventing sound provisional budgetting. This prediction, or more generally the prediction of the learning curve of an SMT system as a function of available in-domain parallel data, is the objective of this paper.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We consider two scenarios, representative of realistic situations.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "1. In the first scenario (S1), the SMT developer is given only monolingual source and target samples from the relevant domain, and a small test parallel corpus.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "2. In the second scenario (S2), an additional small seed parallel corpus is given that can be used to train small in-domain models and measure (with some variance) the evaluation score at a few points on the initial portion of the learning curve.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In both cases, the task consists in predicting an evaluation score (BLEU, throughout this work) on the test corpus as a function of the size of a subset of the source sample, assuming that we could have it manually translated and use the resulting bilingual corpus for training.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In this paper we provide the following contributions:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "1. An extensive study across six parametric function families, empirically establishing that a certain three-parameter power-law family is well suited for modeling learning curves for the Moses SMT system when the evaluation score is BLEU. Our methodology can be easily generalized to other systems and evaluation scores (Section 3); 2. A method for inferring learning curves based on features computed from the resources available in scenario S1, suitable for both the scenarios described above (S1) and (S2) (Section 4); 3. A method for extrapolating the learning curve from a few measurements, suitable for scenario S2 (Section 5); 4. A method for combining the two approaches above, achieving on S2 better prediction accuracy than either of the two in isolation (Section 6).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In this study we limit tuning to the mixing parameters of the Moses log-linear model through MERT, keeping all meta-parameters (e.g. maximum phrase length, maximum allowed distortion, etc.) at their default values. One can expect further tweaking to lead to performance improvements, but this was a necessary simplification in order to execute the tests on a sufficiently large scale.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Our experiments involve 30 distinct language pair and domain combinations and 96 different learning curves. They show that without any parallel data we can predict the expected translation accuracy at 75K segments within an error of 6 BLEU points (Table 4), while using a seed training corpus of 10K segments narrows this error to within 1.5 points (Table 6).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Learning curves are routinely used to illustrate how the performance of experimental methods depend on the amount of training data used. In the SMT area, Koehn et al. (2003) used learning curves to compare performance for various meta-parameter settings such as maximum phrase length, while Turchi et al. (2008) extensively studied the behaviour of learning curves under a number of test conditions on Spanish-English. In Birch et al. (2008) , the authors examined corpus features that contribute most to the machine translation performance. Their results showed that the most predictive features were the morphological complexity of the languages, their linguistic relatedness and their word-order divergence; in our work, we make use of these features, among others, for predicting translation accuracy (Section 4).",
"cite_spans": [
{
"start": 154,
"end": 173,
"text": "Koehn et al. (2003)",
"ref_id": "BIBREF7"
},
{
"start": 291,
"end": 311,
"text": "Turchi et al. (2008)",
"ref_id": "BIBREF18"
},
{
"start": 422,
"end": 441,
"text": "Birch et al. (2008)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "In a Machine Learning context, Perlich et al. (2003) used learning curves for predicting maximum performance bounds of learning algorithms and to compare them. In Gu et al. (2001) , the learning curves of two classification algorithms were modelled for eight different large data sets. This work uses similar a priori knowledge for restricting the form of learning curves as ours (see Section 3), and also similar empirical evaluation criteria for comparing curve families with one another. While both application and performance metric in our work are different, we arrive at a similar conclusion that a power law family of the form y = c \u2212 a x \u2212\u03b1 is a good model of the learning curves.",
"cite_spans": [
{
"start": 31,
"end": 52,
"text": "Perlich et al. (2003)",
"ref_id": "BIBREF14"
},
{
"start": 163,
"end": 179,
"text": "Gu et al. (2001)",
"ref_id": "BIBREF6"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "Learning curves are also frequently used for determining empirically the number of iterations for an incremental learning procedure.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "The crucial difference in our work is that in the previous cases, learning curves are plotted a posteriori i.e. once the labelled data has become available and the training has been performed, whereas in our work the learning curve itself is the object of the prediction. Our goal is to learn to predict what the learning curve will be a priori without having to label the data at all (S1), or through labelling only a very small amount of it (S2).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "In this respect, the academic field of Computational Learning Theory has a similar goal, since it strives to identify bounds to performance measures 1 , typically including a dependency on the training sample size. We take a purely empirical approach in this work, and obtain useful estimations for a case like SMT, where the complexity of the mapping between the input and the output prevents tight theoretical analysis.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "The first step in our approach consists in selecting a suitable family of shapes for the learning curves that we want to produce in the two scenarios being considered.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "We formulate the problem as follows. For a certain bilingual test dataset d, we consider a set of observations",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "O d = {(x 1 , y 1 ), (x 2 , y 2 )...(x n , y n )},",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "where y i is the performance on d (measured using BLEU (Papineni et al., 2002) ) of a translation model trained on a parallel corpus of size x i . The corpus size x i is measured in terms of the number of segments (sentences) present in the parallel corpus.",
"cite_spans": [
{
"start": 55,
"end": 78,
"text": "(Papineni et al., 2002)",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "We consider such observations to be generated by a regression model of the form:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "y i = F (x i ; \u03b8) + i 1 \u2264 i \u2264 n (1)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "where F is a function depending on a vector parameter \u03b8 which depends on d, and i is Gaussian noise of constant variance. Based on our prior knowledge of the problem, we limit the search for a suitable F to families that satisfies the following conditions-monotonically increasing, concave and bounded. The first condition just says that more training data is better. The second condition expresses a notion of \"diminishing returns\", namely that a given amount of additional training data is more advantageous when added to a small rather than to a big amount of initial data. The last condition is related to our use of BLEUwhich is bounded by 1 -as a performance measure; It should be noted that some growth patterns which are sometimes proposed, such as a logarithmic regime of the form y a + b log x, are not compatible with this constraint. We consider six possible families of functions satisfying these conditions, which are listed in Table 1 . Preliminary experiments indicated that curves from the \"Power\" and \"Exp\" family with only two parameters underfitted, while those with five or more parameters led to overfitting and solution instability. We decided to only select families with three or four parameters.",
"cite_spans": [],
"ref_spans": [
{
"start": 942,
"end": 949,
"text": "Table 1",
"ref_id": "TABREF0"
}
],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "Model Formula Exp 3 y = c \u2212 e \u2212ax+b Exp 4 y = c \u2212 e \u2212ax \u03b1 +b ExpP 3 y = c \u2212 e (x\u2212b) \u03b1 Pow 3 y = c \u2212 ax \u2212\u03b1 Pow 4 y = c \u2212 (\u2212ax + b) \u2212\u03b1 ILog 2 y = c \u2212 (a/ log x)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "Curve fitting technique Given a set of observations Table 1 , we compute a best fit\u03b8 where:\u03b8",
"cite_spans": [],
"ref_spans": [
{
"start": 52,
"end": 59,
"text": "Table 1",
"ref_id": "TABREF0"
}
],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "{(x 1 , y 1 ), (x 2 , y 2 )...(x n , y n )} and a curve fam- ily F (x; \u03b8) from",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "= arg min \u03b8 n i=1 [y i \u2212 F (x i ; \u03b8)] 2 ,",
"eq_num": "(2)"
}
],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "through use of the Levenberg-Marquardt method (Mor\u00e9, 1978) for non-linear regression. For selecting a learning curve family, and for all other experiments in this paper, we trained a large number of systems on multiple configurations of training sets and sample sizes, and tested each on multiple test sets; these are listed in Table 2 . All experiments use Moses (Koehn et al., 2007 (Neubig, 2011) Jp, En En, Jp 2 EMEA (Tiedemann, 2009) Da, De En 4 News (Callison-Burch et al., 2011) Cz,En,Fr,De,Es Cz,En,Fr,De,Es 3 The goodness of fit for each of the families is eval- 2 The settings used in training the systems are those described in http://www.statmt.org/wmt11/ baseline.html uated based on their ability to i) fit over the entire set of observations, ii) extrapolate to points beyond the observed portion of the curve and iii) generalize well over different datasets .",
"cite_spans": [
{
"start": 46,
"end": 58,
"text": "(Mor\u00e9, 1978)",
"ref_id": "BIBREF10"
},
{
"start": 364,
"end": 383,
"text": "(Koehn et al., 2007",
"ref_id": "BIBREF8"
},
{
"start": 384,
"end": 398,
"text": "(Neubig, 2011)",
"ref_id": "BIBREF11"
},
{
"start": 420,
"end": 437,
"text": "(Tiedemann, 2009)",
"ref_id": "BIBREF17"
},
{
"start": 455,
"end": 484,
"text": "(Callison-Burch et al., 2011)",
"ref_id": "BIBREF1"
},
{
"start": 571,
"end": 572,
"text": "2",
"ref_id": null
}
],
"ref_spans": [
{
"start": 328,
"end": 335,
"text": "Table 2",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "We use a recursive fitting procedure where the curve obtained from fitting the first i points is used to predict the observations at two points: x i+1 , i.e. the point to the immediate right of the currently observed x i and x n , i.e. the largest point that has been observed.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "The following error measures quantify the goodness of fit of the curve families:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "1. Average root mean-squared error (RMSE):",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "1 N c\u2208S t\u2208Tc 1 n n i=1 [y i \u2212 F (x i ;\u03b8)] 2 1/2 ct",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "where S is the set of training datasets, T c is the set of test datasets for training configuration c, \u03b8 is as defined in Eq. 2, N is the total number of combinations of training configurations and test datasets, and i ranges on a grid of training subset sizes.The expressions n, x i , y i ,\u03b8 are all local to the combination ct.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "2. Average root mean squared residual at next point X = x i+1 (NPR):",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "1 N c\u2208S t\u2208Tc 1 n \u2212 k \u2212 1 n\u22121 i=k [y i+1 \u2212 F (x i+1 ;\u03b8 i )] 2 1/2 ct",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "where\u03b8 i is obtained using only observations up to x i in Eq. 2 and where k is the number of parameters of the family. 3 3. Average root mean squared residual at the last point X = x n (LPR):",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "1 N c\u2208S t\u2208Tc 1 n \u2212 k \u2212 1 n\u22121 i=k [y n \u2212 F (x n ;\u03b8 i )] 2 1/2 ct",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "Curve fitting evaluation The evaluation of the goodness of fit for the curve families is presented in Table 3 . The average values of the root meansquared error and the average residuals across all the learning curves used in our experiments are shown in this table. The values are on the same scale as the BLEU scores. Figure 1 shows the curve fits obtained Loooking at the values in Table 3 , we decided to use the Pow 3 family as the best overall compromise. While it is not systematically better than Exp 4 and Pow 4 , it is good overall and has the advantage of requiring only 3 parameters.",
"cite_spans": [],
"ref_spans": [
{
"start": 102,
"end": 109,
"text": "Table 3",
"ref_id": "TABREF4"
},
{
"start": 320,
"end": 328,
"text": "Figure 1",
"ref_id": null
},
{
"start": 385,
"end": 392,
"text": "Table 3",
"ref_id": "TABREF4"
}
],
"eq_spans": [],
"section": "Selecting a parametric family of curves",
"sec_num": "3"
},
{
"text": "In this section we address scenario S1: we have access to a source-language monolingual collection (from which portions to be manually translated could be sampled) and a target-language in-domain monolingual corpus, to supplement the target side of a parallel corpus while training a language model. The only available parallel resource is a very small test corpus. Our objective is to predict the evolution of the BLEU score on the given test set as a function of the size of a random subset of the training data that we manually translate 4 . The intuition behind this is that the source-side and target-side monolingual data already convey significant information about the difficulty of the translation task. We proceed in the following way. We first train models to predict the BLEU score at m anchor sizes s 1 , . . . , s m , based on a set of features globally characterizing the configuration of interest. We restrict our attention to linear models:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Inferring a learning curve from mostly monolingual data",
"sec_num": "4"
},
{
"text": "\u00b5 j = w j \u03c6, j \u2208 {1 . . . m}",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Inferring a learning curve from mostly monolingual data",
"sec_num": "4"
},
{
"text": "where w j is a vector of feature weights specific to predicting at anchor size j, and \u03c6 is a vector of sizeindependent configuration features, detailed below. We then perform inference using these models to predict the BLEU score at each anchor, for the test case of interest. We finally estimate the parameters of the learning curve by weighted least squares regression using the anchor predictions.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Inferring a learning curve from mostly monolingual data",
"sec_num": "4"
},
{
"text": "Anchor sizes can be chosen rather arbitrarily, but must satisfy the following two constraints:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Inferring a learning curve from mostly monolingual data",
"sec_num": "4"
},
{
"text": "1. They must be three or more in number in order to allow fitting the tri-parameter curve.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Inferring a learning curve from mostly monolingual data",
"sec_num": "4"
},
{
"text": "2. They should be spread as much as possible along the range of sample size.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Inferring a learning curve from mostly monolingual data",
"sec_num": "4"
},
{
"text": "For our experiments, we take m = 3, with anchors at 10K, 75K and 500K segments. The feature vector \u03c6 consists of the following features:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Inferring a learning curve from mostly monolingual data",
"sec_num": "4"
},
{
"text": "1. General properties: number and average length of sentences in the (source) test set.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Inferring a learning curve from mostly monolingual data",
"sec_num": "4"
},
{
"text": "2. Average length of tokens in the (source) test set and in the monolingual source language corpus.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Inferring a learning curve from mostly monolingual data",
"sec_num": "4"
},
{
"text": "(a) type-token ratios for n-grams of order 1 to 5 in the monolingual corpus of both source and target languages (b) perplexity of language models of order 2 to 5 derived from the monolingual source corpus computed on the source side of the test corpus.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Lexical diversity features:",
"sec_num": "3."
},
{
"text": "4. Features capturing divergence between languages in the pair:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Lexical diversity features:",
"sec_num": "3."
},
{
"text": "(a) average ratio of source/target sentence lengths in the test set. (b) ratio of type-token ratios of orders 1 to 5 in the monolingual corpus of both source and target languages.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Lexical diversity features:",
"sec_num": "3."
},
{
"text": "5. Word-order divergence: The divergence in the word-order between the source and the target languages can be captured using the part-ofspeech (pos) tag sequences across languages. We use cross-entropy measure to capture similarity between the n-gram distributions of the pos tags in the monolingual corpora of the two languages. The order of the n-grams ranges between n = 2, 4 . . . 12 in order to account for long distance reordering between languages. The pos tags for the languages are mapped to a reduced set of twelve pos tags (Petrov et al., 2012) in order to account for differences in tagsets used across languages.",
"cite_spans": [
{
"start": 534,
"end": 555,
"text": "(Petrov et al., 2012)",
"ref_id": "BIBREF15"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Lexical diversity features:",
"sec_num": "3."
},
{
"text": "These features capture our intuition that translation is going to be harder if the language in the domain is highly variable and if the source and target languages diverge more in terms of morphology and word-order. The weights w j are estimated from data. The training data for fitting these linear models is obtained in the following way. For each configuration (combination of language pair and domain) c and test set t in Table 2 , a gold curve is fitted using the selected tri-parameter power-law family using a fine grid of corpus sizes. This is available as a byproduct of the experiments for comparing different parametric families described in Section 3. We then compute the value of the gold curves at the m anchor sizes: we thus have m \"gold\" vectors \u00b5 1 , . . . , \u00b5 m with accurate estimates of BLEU at the anchor sizes 5 . We construct the design matrix \u03a6 with one column for each feature vector \u03c6 ct corresponding to each combination of training configuration c and test set t.",
"cite_spans": [],
"ref_spans": [
{
"start": 426,
"end": 433,
"text": "Table 2",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Lexical diversity features:",
"sec_num": "3."
},
{
"text": "We then estimate weights w j using Ridge regression (L 2 regularization):",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Lexical diversity features:",
"sec_num": "3."
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "w j = arg min w ||\u03a6 w \u2212 \u00b5 j || 2 + C||w|| 2",
"eq_num": "(3)"
}
],
"section": "Lexical diversity features:",
"sec_num": "3."
},
{
"text": "where the regularization parameter C is chosen by cross-validation. We also run experiments using Lasso (L 1 ) regularization (Tibshirani, 1994) instead of Ridge. As baseline, we take a constant mean model predicting, for each anchor size s j , the average of all the \u00b5 jct . We do not assume the difficulty of predicting BLEU at all anchor points to be the same. To allow for this, we use (non-regularized) weighted leastsquares to fit a curve from our parametric family through the m anchor points 6 . Following (Croarkin and Tobias, 2006, Section 4.4.5 .2), the anchor confidence is set to be the inverse of the cross-validated mean square residuals:",
"cite_spans": [
{
"start": 126,
"end": 144,
"text": "(Tibshirani, 1994)",
"ref_id": "BIBREF16"
},
{
"start": 514,
"end": 555,
"text": "(Croarkin and Tobias, 2006, Section 4.4.5",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Lexical diversity features:",
"sec_num": "3."
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "\u03c9 j = 1 N c\u2208S t\u2208Tc (\u03c6 ct w \\c j \u2212 \u00b5 jct ) 2 \u22121",
"eq_num": "(4)"
}
],
"section": "Lexical diversity features:",
"sec_num": "3."
},
{
"text": "where w \\c j are the feature weights obtained by the regression above on all training configurations except c, \u00b5 jct is the gold value at anchor j for training/test combination c, t, and N is the total number of such combinations 7 . In other words, we assign to each anchor point a confidence inverse to the crossvalidated mean squared error of the model used to predict it.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Lexical diversity features:",
"sec_num": "3."
},
{
"text": "For a new unseen configuration with feature vector \u03c6 u , we determine the parameters \u03b8 u of the corresponding learning curve as:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Lexical diversity features:",
"sec_num": "3."
},
{
"text": "\u03b8 u = arg min \u03b8 j \u03c9 j F (s j ; \u03b8) \u2212 \u03c6 u w j 2 (5)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Lexical diversity features:",
"sec_num": "3."
},
{
"text": "5 Extrapolating a learning curve fitted on a small parallel corpus Given a small \"seed\" parallel corpus, the translation system can be used to train small in-domain models and the evaluation score can be measured at a few initial sample sizes",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Lexical diversity features:",
"sec_num": "3."
},
{
"text": "{(x 1 , y 1 ), (x 2 , y 2 )...(x p , y p )}.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Lexical diversity features:",
"sec_num": "3."
},
{
"text": "The performance of the system for these initial points provides evidence for predicting its performance for larger sample sizes. In order to do so, a learning curve from the family Pow 3 is first fit through these initial points. We assume that p \u2265 3 for this operation to be welldefined. The best fit\u03b7 is computed using the same curve fitting as in Eq. 2.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Lexical diversity features:",
"sec_num": "3."
},
{
"text": "At each individual anchor size s j , the accuracy of prediction is measured using the root mean-squared error between the prediction of extrapolated curves and the gold values:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Lexical diversity features:",
"sec_num": "3."
},
{
"text": "1 N c\u2208S t\u2208Tc [F (s j ;\u03b7 ct ) \u2212 \u00b5 ctj ] 2 1/2 (6)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Lexical diversity features:",
"sec_num": "3."
},
{
"text": "where\u03b7 ct are the parameters of the curve fit using the initial points for the combination ct.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Lexical diversity features:",
"sec_num": "3."
},
{
"text": "In general, we observed that the extrapolated curve tends to over-estimate BLEU for large samples.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Lexical diversity features:",
"sec_num": "3."
},
{
"text": "In scenario S2, the models trained from the seed parallel corpus and the features used for inference (Section 4) provide complementary information. In this section we combine the two to see if this yields more accurate learning curves.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Combining inference and extrapolation",
"sec_num": "6"
},
{
"text": "For the inference method of Section 4, predictions of models at anchor points are weighted by the inverse of the model empirical squared error (\u03c9 j ). We extend this approach to the extrapolated curves. Let u be a new configuration with seed parallel corpus of size x u , and let x l be the largest point in our grid for which x l \u2264 x u . We first train translation models and evaluate scores on samples of size x 1 , . . . , x l , fit parameters\u03b7 u through the scores, and then extrapolate BLEU at the anchors s j : F (s j ;\u03b7 u ), j \u2208 {1, . . . , m}. Using the models trained for the experiments in Section 3, we estimate the squared extrapolation error at the anchors s j when using models trained on size up to x l , and set the confidence in the extrapolations 8 for u to its inverse:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Combining inference and extrapolation",
"sec_num": "6"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "\u03be <l j = 1 N c\u2208S t\u2208Tc (F (s j ; \u03b7 <l ct ) \u2212 \u00b5 ctj ) 2 \u22121",
"eq_num": "(7)"
}
],
"section": "Combining inference and extrapolation",
"sec_num": "6"
},
{
"text": "where N , S, T c and \u00b5 ctj have the same meaning as in Eq. 4, and \u03b7 <l ct are parameters fitted for configuration c and test t using only scores measured at x 1 , . . . , x l . We finally estimate the parameters \u03b8 u of 8 In some cases these can actually be interpolations. the combined curve as:",
"cite_spans": [
{
"start": 219,
"end": 220,
"text": "8",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Combining inference and extrapolation",
"sec_num": "6"
},
{
"text": "\u03b8 u = arg min \u03b8 j \u03c9 j (F (s j ; \u03b8) \u2212 \u03c6 u w j ) 2 + \u03be <l j (F (s j ; \u03b8) \u2212 F (s j ;\u03b7 u )) 2",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Combining inference and extrapolation",
"sec_num": "6"
},
{
"text": "where \u03c6 u is the feature vector for u, and w j are the weights we obtained from the regression in Eq. 3.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Combining inference and extrapolation",
"sec_num": "6"
},
{
"text": "In this section, we report the results of our experiments on predicting the learning curves. In the case of inference from mostly monolingual data, the accuracy of the predictions at each of the anchor sizes is evaluated using root mean-squared error over the predictions obtained in a leave-oneout manner over the set of configurations from Table 2. Table 4 shows these results for Ridge and Lasso regression models at the three anchor sizes. As an example, the model estimated using Lasso for the 75K anchor size exhibits a root mean squared error of 6 BLEU points. The errors we obtain are lower than the error of the baseline consisting in taking, for each anchor size s j , the average of all the \u00b5 ctj . The Lasso regression model selected four features from the entire feature set: i) Size of the test set (sentences & tokens) ii) Perplexity of language model (order 5) on the test set iii) Type-token ratio of the target monolingual corpus . Feature correlation measures such as Pearsons R showed that the features corresponding to type-token ratios of both source and target languages and size of test set have a high correlation with the BLEU scores at the three anchor sizes. Figure 2 shows an instance of the inferred learning curves obtained using a weighted least squares method on the predictions at the anchor sizes. Table 7 presents the cumulative error of the inferred learning curves with respect to the gold curves, measured as the average distance between the curves in the range x \u2208 [0.1K, 100K]. ",
"cite_spans": [],
"ref_spans": [
{
"start": 351,
"end": 358,
"text": "Table 4",
"ref_id": "TABREF6"
},
{
"start": 1187,
"end": 1195,
"text": "Figure 2",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Experiments",
"sec_num": "7"
},
{
"text": "As explained in Section 5, we evaluate the accuracy of predictions from the extrapolated curve using the root mean squared error (see Eq. 6) between the predictions of this curve and the gold values at the anchor points.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Extrapolated Learning Curves",
"sec_num": "7.2"
},
{
"text": "We conducted experiments for three sets of initial points, 1) 1K-5K-10K, 2) 5K-10K-20K, and 3) 1K-5K-10K-20K. For each of these sets, we show the prediction accuracy at the anchor sizes, 10K 9 , 75K, and 500K in Table 5 .",
"cite_spans": [],
"ref_spans": [
{
"start": 212,
"end": 219,
"text": "Table 5",
"ref_id": null
}
],
"eq_spans": [],
"section": "Extrapolated Learning Curves",
"sec_num": "7.2"
},
{
"text": "10K 75K 500K 1K-5K-10K 0.005 0.017 0.042 5K-10K-20K 0.002 0.015 0.034 1K-5K-10K-20K 0.002 0.008 0.019 Table 5 : Root mean squared error of the extrapolated curves at the three anchor sizes",
"cite_spans": [],
"ref_spans": [
{
"start": 102,
"end": 109,
"text": "Table 5",
"ref_id": null
}
],
"eq_spans": [],
"section": "Initial Points",
"sec_num": null
},
{
"text": "The root mean squared errors obtained by extrapolating the learning curve are much lower than those obtained by prediction of translation accuracy using the monolingual corpus only (see Table 4 ), which is expected given that more direct evidence is available in the former case . In Table 5 , one can also see that the root mean squared error for the sets 1K-5K-10K and 5K-10K-20K are quite close for anchor 9 The 10K point is not an extrapolation point but lies within the range of the set of initial points. However, it does give a measure of the closeness of the curve fit using only the initial points with the gold fit using all the points; the value of this gold fit at 10K is not necessarily equal to the observation at 10K. sizes 75K and 500K. However, when a configuration of four initial points is used for the same amount of \"seed\" parallel data, it outperforms both the configurations with three initial points.",
"cite_spans": [
{
"start": 409,
"end": 410,
"text": "9",
"ref_id": null
}
],
"ref_spans": [
{
"start": 186,
"end": 193,
"text": "Table 4",
"ref_id": "TABREF6"
},
{
"start": 284,
"end": 291,
"text": "Table 5",
"ref_id": null
}
],
"eq_spans": [],
"section": "Initial Points",
"sec_num": null
},
{
"text": "In Section 6, we presented a method for combining the predicted learning curves from inference and extrapolation by using a weighted least squares approach. Table 6 reports the root mean squared error at the three anchor sizes from the combined curves. We also present an overall evaluation of all the predicted learning curves. The evaluation metric is the average distance between the predicted curves and the gold curves, within the range of sample sizes x min =0.1K to x max =500K segments; this metric is defined as:",
"cite_spans": [],
"ref_spans": [
{
"start": 157,
"end": 164,
"text": "Table 6",
"ref_id": "TABREF8"
}
],
"eq_spans": [],
"section": "Combined Learning Curves and Overall Comparison",
"sec_num": "7.3"
},
{
"text": "1 N c\u2208S t\u2208Tc xmax x=x min |F (x;\u03b7 ct ) \u2212 F (x;\u03b8 ct )| x max \u2212 x min",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Combined Learning Curves and Overall Comparison",
"sec_num": "7.3"
},
{
"text": "where\u03b7 ct is the curve of interest,\u03b8 ct is the gold curve, and x is in the range [x min , x max ], with a step size of 1. Table 7 presents the final evaluation.",
"cite_spans": [],
"ref_spans": [
{
"start": 122,
"end": 129,
"text": "Table 7",
"ref_id": null
}
],
"eq_spans": [],
"section": "Combined Learning Curves and Overall Comparison",
"sec_num": "7.3"
},
{
"text": "Initial Points IR IL EC CR CL 1K-5K-10K 0.034 0.050 0.018 0.015 0.014 5K-10K-20K 0.036 0.048 0.011 0.010 0.009 1K-5K-10K-20K 0.032 0.049 0.008 0.007 0.007 Table 7 : Average distance of different predicted learning curves relative to the gold curve. Columns: IR=\"Inference using Ridge model\", IL=\"Inference using Lasso model\", EC=\"Extrapolated curve\", CR=\"Combined curve using Ridge\", CL=\"Combined curve using Lasso\"",
"cite_spans": [],
"ref_spans": [
{
"start": 155,
"end": 162,
"text": "Table 7",
"ref_id": null
}
],
"eq_spans": [],
"section": "Combined Learning Curves and Overall Comparison",
"sec_num": "7.3"
},
{
"text": "We see that the combined curves (CR and CL) perform slightly better than the inferred curves (IR and IL) and the extrapolated curves (EC). The average distance is on the same scale as the BLEU score, which suggests that our best curves can predict the gold curve within 1.5 BLEU points on average (the best result being 0.7 BLEU points when the initial points are 1K-5K-10K-20K) which is a telling result. The distances between the predicted and the gold curves for all the learning curves in our experiments are shown in Figure 3 . Figure 3 : Distances between the predicted and the gold learning curves in our experiments across the range of sample sizes. The dotted lines indicate the distance from gold curve for each instance, while the bold line indicates the 95 th quantile of the distance between the curves. IR=\"Inference using Ridge model\", EC=\"Extrapolated curve\", CR=\"Combined curve using Ridge\".",
"cite_spans": [],
"ref_spans": [
{
"start": 522,
"end": 530,
"text": "Figure 3",
"ref_id": null
},
{
"start": 533,
"end": 541,
"text": "Figure 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "Combined Learning Curves and Overall Comparison",
"sec_num": "7.3"
},
{
"text": "We also provide a comparison of the different predicted curves with respect to the gold curve as shown in ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Combined Learning Curves and Overall Comparison",
"sec_num": "7.3"
},
{
"text": "The ability to predict the amount of parallel data required to achieve a given level of quality is very valuable in planning business deployments of statistical machine translation; yet, we are not aware of any rigorous proposal for addressing this need.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "8"
},
{
"text": "Here, we proposed methods that can be directly applied to predicting learning curves in realistic scenarios. We identified a suitable parametric family for modeling learning curves via an extensive empirical comparison. We described an inference method that requires a minimal initial investment in the form of only a small parallel test dataset. For the cases where a slightly larger in-domain \"seed\" parallel corpus is available, we introduced an extrapolation method and a combined method yielding highprecision predictions: using models trained on up to 20K sentence pairs we can predict performance on a given test set with a root mean squared error in the order of 1 BLEU point at 75K sentence pairs, and in the order of 2-4 BLEU points at 500K. Considering that variations in the order of 1 BLEU point on a same test dataset can be observed simply due to the instability of the standard MERT parameter tuning algorithm (Foster and Kuhn, 2009; Clark et al., 2011) , we believe our results to be close to what can be achieved in principle. Note that by using gold curves as labels instead of actual measures we implicitly average across many rounds of MERT (14 for each curve), greatly attenuating the impact of the instability in the optimization procedure due to randomness.",
"cite_spans": [
{
"start": 926,
"end": 949,
"text": "(Foster and Kuhn, 2009;",
"ref_id": "BIBREF5"
},
{
"start": 950,
"end": 969,
"text": "Clark et al., 2011)",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "8"
},
{
"text": "For enabling this work we trained a multitude of instances of the same phrase-based SMT system on 30 distinct combinations of language-pair and domain, each with fourteen distinct training sets of increasing size and tested these instances on multiple in-domain datasets, generating 96 learning curves. BLEU measurements for all 96 learning curves along with the gold curves and feature values used for inferring the learning curves are available as additional material to this submission.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "8"
},
{
"text": "We believe that it should be possible to use insights from this paper in an active learning setting, to select, from an available monolingual source, a subset of a given size for manual translation, in such a way at to yield the highest performance, and we plan to extend our work in this direction.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "8"
},
{
"text": "More often to a loss, which is equivalent.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "We start the summation from i = k, because at least k points are required for computing\u03b8 i .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "We specify that it is a random sample as opposed to a subset deliberately chosen to maximize learning effectiveness. While there are clear ties between our present work and active learning, we prefer to keep these two aspects distinct at this stage, and intend to explore this connection in future work.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Computing these values from the gold curve rather than directly from the observations has the advantage of smoothing the observed values and also does not assume that observations at the anchor sizes are always directly available.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "When the number of anchor points is the same as the number of parameters in the parametric family, the curve can be fit exactly through all anchor points. However the general discussion is relevant in case there are more anchor points than parameters, and also in view of the combination of inference and extrapolation in Section 6.7 Curves on different test data for the same training configuration are highly correlated and are therefore left out.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Predicting Success in Machine Translation",
"authors": [
{
"first": "Alexandra",
"middle": [],
"last": "Birch",
"suffix": ""
},
{
"first": "Miles",
"middle": [],
"last": "Osborne",
"suffix": ""
},
{
"first": "Philipp",
"middle": [],
"last": "Koehn",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of the 2008 Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "745--754",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Alexandra Birch, Miles Osborne, and Philipp Koehn. 2008. Predicting Success in Machine Translation. In Proceedings of the 2008 Conference on Empirical Methods in Natural Language Processing, pages 745- 754, Honolulu, Hawaii, October. Association for Com- putational Linguistics.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Findings of the 2011 Workshop on Statistical Machine Translation",
"authors": [
{
"first": "Chris",
"middle": [],
"last": "Callison-Burch",
"suffix": ""
},
{
"first": "Philipp",
"middle": [],
"last": "Koehn",
"suffix": ""
},
{
"first": "Christof",
"middle": [],
"last": "Monz",
"suffix": ""
},
{
"first": "Omar",
"middle": [],
"last": "Zaidan",
"suffix": ""
}
],
"year": 2011,
"venue": "Proceedings of the Sixth Workshop on Statistical Machine Translation",
"volume": "",
"issue": "",
"pages": "22--64",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chris Callison-Burch, Philipp Koehn, Christof Monz, and Omar Zaidan. 2011. Findings of the 2011 Work- shop on Statistical Machine Translation. In Proceed- ings of the Sixth Workshop on Statistical Machine Translation, pages 22-64, Edinburgh, Scotland, July. Association for Computational Linguistics.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Better Hypothesis Testing for Statistical Machine Translation: Controlling for Optimizer Instability",
"authors": [
{
"first": "Jonathan",
"middle": [
"H"
],
"last": "Clark",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "Dyer",
"suffix": ""
},
{
"first": "Alon",
"middle": [],
"last": "Lavie",
"suffix": ""
},
{
"first": "Noah",
"middle": [
"A"
],
"last": "Smith",
"suffix": ""
}
],
"year": 2011,
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies",
"volume": "",
"issue": "",
"pages": "176--181",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jonathan H. Clark, Chris Dyer, Alon Lavie, and Noah A. Smith. 2011. Better Hypothesis Testing for Statis- tical Machine Translation: Controlling for Optimizer Instability. In Proceedings of the 49th Annual Meet- ing of the Association for Computational Linguistics: Human Language Technologies, pages 176-181, Port- land, Oregon, USA, June. Association for Computa- tional Linguistics.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "NIST/SEMATECH e-Handbook of Statistical Methods. NIST/SEMATECH, July. Available online",
"authors": [],
"year": null,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "NIST/SEMATECH e-Handbook of Statistical Meth- ods. NIST/SEMATECH, July. Available online: http://www.itl.nist.gov/div898/handbook/.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Stabilizing Minimum Error Rate Training",
"authors": [
{
"first": "George",
"middle": [],
"last": "Foster",
"suffix": ""
},
{
"first": "Roland",
"middle": [],
"last": "Kuhn",
"suffix": ""
}
],
"year": 2009,
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation",
"volume": "",
"issue": "",
"pages": "242--249",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "George Foster and Roland Kuhn. 2009. Stabilizing Minimum Error Rate Training. In Proceedings of the Fourth Workshop on Statistical Machine Translation, pages 242-249, Athens, Greece, March. Association for Computational Linguistics.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Modelling Classification Performance for Large Data Sets",
"authors": [
{
"first": "Baohua",
"middle": [],
"last": "Gu",
"suffix": ""
},
{
"first": "Feifang",
"middle": [],
"last": "Hu",
"suffix": ""
},
{
"first": "Huan",
"middle": [],
"last": "Liu",
"suffix": ""
}
],
"year": 2001,
"venue": "Proceedings of the Second International Conference on Advances in Web-Age Information Management, WAIM '01",
"volume": "",
"issue": "",
"pages": "317--328",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Baohua Gu, Feifang Hu, and Huan Liu. 2001. Mod- elling Classification Performance for Large Data Sets. In Proceedings of the Second International Conference on Advances in Web-Age Information Management, WAIM '01, pages 317-328, London, UK. Springer- Verlag.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Statistical Phrase-Based Translation",
"authors": [
{
"first": "Philipp",
"middle": [],
"last": "Koehn",
"suffix": ""
},
{
"first": "Franz",
"middle": [
"J"
],
"last": "Och",
"suffix": ""
},
{
"first": "Daniel",
"middle": [],
"last": "Marcu",
"suffix": ""
}
],
"year": 2003,
"venue": "Proceedings of Human Language Technologies: The 2003 Annual Conference of the North American Chapter of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "48--54",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Philipp Koehn, Franz J. Och, and Daniel Marcu. 2003. Statistical Phrase-Based Translation. In Proceedings of Human Language Technologies: The 2003 Annual Conference of the North American Chapter of the As- sociation for Computational Linguistics, pages 48-54, Edmonton, Canada, May. Association for Computa- tional Linguistics.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Moses: Open Source Toolkit for Statistical Machine Translation",
"authors": [
{
"first": "Philipp",
"middle": [],
"last": "Koehn",
"suffix": ""
},
{
"first": "Hieu",
"middle": [],
"last": "Hoang",
"suffix": ""
},
{
"first": "Alexandra",
"middle": [],
"last": "Birch",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "Callison-Burch",
"suffix": ""
},
{
"first": "Marcello",
"middle": [],
"last": "Federico",
"suffix": ""
},
{
"first": "Nicola",
"middle": [],
"last": "Bertoldi",
"suffix": ""
},
{
"first": "Brooke",
"middle": [],
"last": "Cowan",
"suffix": ""
},
{
"first": "Wade",
"middle": [],
"last": "Shen",
"suffix": ""
},
{
"first": "Christine",
"middle": [],
"last": "Moran",
"suffix": ""
},
{
"first": "Richard",
"middle": [],
"last": "Zens",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "Dyer",
"suffix": ""
},
{
"first": "Ondrej",
"middle": [],
"last": "Bojar",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics Companion Volume Proceedings of the Demo and Poster Sessions",
"volume": "",
"issue": "",
"pages": "177--180",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Philipp Koehn, Hieu Hoang, Alexandra Birch, Chris Callison-Burch, Marcello Federico, Nicola Bertoldi, Brooke Cowan, Wade Shen, Christine Moran, Richard Zens, Chris Dyer, Ondrej Bojar, Alexandra Con- stantin, and Evan Herbst. 2007. Moses: Open Source Toolkit for Statistical Machine Translation. In Pro- ceedings of the 45th Annual Meeting of the Associ- ation for Computational Linguistics Companion Vol- ume Proceedings of the Demo and Poster Sessions, pages 177-180, Prague, Czech Republic, June. Asso- ciation for Computational Linguistics.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Europarl: A Parallel Corpus for Statistical Machine Translation",
"authors": [
{
"first": "Philipp",
"middle": [],
"last": "Koehn",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of the 10th Machine Translation Summit",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Philipp Koehn. 2005. Europarl: A Parallel Corpus for Statistical Machine Translation. In Proceedings of the 10th Machine Translation Summit, Phuket, Thailand, September.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "The Levenberg-Marquardt Algorithm: Implementation and Theory. Numerical Analysis",
"authors": [
{
"first": "Jorge",
"middle": [
"J"
],
"last": "Mor\u00e9",
"suffix": ""
}
],
"year": 1977,
"venue": "Proceedings Biennial Conference Dundee",
"volume": "630",
"issue": "",
"pages": "105--116",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jorge J. Mor\u00e9. 1978. The Levenberg-Marquardt Algo- rithm: Implementation and Theory. Numerical Anal- ysis. Proceedings Biennial Conference Dundee 1977, 630:105-116.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "The Kyoto Free Translation Task",
"authors": [
{
"first": "Graham",
"middle": [],
"last": "Neubig",
"suffix": ""
}
],
"year": 2011,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Graham Neubig. 2011. The Kyoto Free Translation Task. http://www.phontron.com/kftt.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Bleu: a Method for Automatic Evaluation of Machine Translation",
"authors": [
{
"first": "Kishore",
"middle": [],
"last": "Papineni",
"suffix": ""
},
{
"first": "Salim",
"middle": [],
"last": "Roukos",
"suffix": ""
},
{
"first": "Todd",
"middle": [],
"last": "Ward",
"suffix": ""
},
{
"first": "Wei-Jing",
"middle": [],
"last": "Zhu",
"suffix": ""
}
],
"year": 2002,
"venue": "Proceedings of 40th",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a Method for Automatic Eval- uation of Machine Translation. In Proceedings of 40th",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Annual Meeting of the Association for Computational Linguistics",
"authors": [],
"year": null,
"venue": "",
"volume": "",
"issue": "",
"pages": "311--318",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Annual Meeting of the Association for Computational Linguistics, pages 311-318, Philadelphia, Pennsylva- nia, USA, July. Association for Computational Lin- guistics.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Tree Induction vs. Logistic Regression: A Learning-Curve Analysis",
"authors": [
{
"first": "Claudia",
"middle": [],
"last": "Perlich",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Foster",
"suffix": ""
},
{
"first": "Jeffrey",
"middle": [
"S"
],
"last": "Provost",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Simonoff",
"suffix": ""
}
],
"year": 2003,
"venue": "Journal of Machine Learning Research",
"volume": "4",
"issue": "",
"pages": "211--255",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Claudia Perlich, Foster J. Provost, and Jeffrey S. Si- monoff. 2003. Tree Induction vs. Logistic Regres- sion: A Learning-Curve Analysis. Journal of Machine Learning Research, 4:211-255.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "A Universal Part-of-Speech Tagset",
"authors": [
{
"first": "Slav",
"middle": [],
"last": "Petrov",
"suffix": ""
},
{
"first": "Dipanjan",
"middle": [],
"last": "Das",
"suffix": ""
},
{
"first": "Ryan",
"middle": [],
"last": "Mcdonald",
"suffix": ""
}
],
"year": 2012,
"venue": "Proceedings of the Eighth conference on International Language Resources and Evaluation (LREC'12)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Slav Petrov, Dipanjan Das, and Ryan McDonald. 2012. A Universal Part-of-Speech Tagset. In Proceedings of the Eighth conference on International Language Resources and Evaluation (LREC'12), Istanbul, May. European Language Resources Association (ELRA).",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Regression Shrinkage and Selection Via the Lasso",
"authors": [
{
"first": "Robert",
"middle": [],
"last": "Tibshirani",
"suffix": ""
}
],
"year": 1994,
"venue": "Journal of the Royal Statistical Society, Series B",
"volume": "58",
"issue": "",
"pages": "267--288",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Robert Tibshirani. 1994. Regression Shrinkage and Se- lection Via the Lasso. Journal of the Royal Statistical Society, Series B, 58:267-288.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "News from OPUS -A Collection of Multilingual Parallel Corpora with Tools and Interfaces",
"authors": [
{
"first": "J\u00f6rg",
"middle": [],
"last": "Tiedemann",
"suffix": ""
}
],
"year": 2009,
"venue": "Recent Advances in Natural Language Processing, volume V",
"volume": "",
"issue": "",
"pages": "237--248",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "J\u00f6rg Tiedemann. 2009. News from OPUS -A Collection of Multilingual Parallel Corpora with Tools and Inter- faces. In Recent Advances in Natural Language Pro- cessing, volume V, pages 237-248. John Benjamins, Amsterdam/Philadelphia, Borovets, Bulgaria.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Learning Performance of a Machine Translation System: a Statistical and Computational Analysis",
"authors": [
{
"first": "Marco",
"middle": [],
"last": "Turchi",
"suffix": ""
},
{
"first": "Nello",
"middle": [],
"last": "Tijl De Bie",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Cristianini",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of the Third Workshop on Statistical Machine Translation",
"volume": "",
"issue": "",
"pages": "35--43",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Marco Turchi, Tijl De Bie, and Nello Cristianini. 2008. Learning Performance of a Machine Translation Sys- tem: a Statistical and Computational Analysis. In Pro- ceedings of the Third Workshop on Statistical Machine Translation, pages 35-43, Columbus, Ohio, June. As- sociation for Computational Linguistics.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"type_str": "figure",
"text": "Inferred learning curve for English-Japanese test set. The error-bars show the anchor confidence for the predictions.",
"uris": null,
"num": null
},
"FIGREF1": {
"type_str": "figure",
"text": "Figure 4.",
"uris": null,
"num": null
},
"FIGREF2": {
"type_str": "figure",
"text": "Predicted curves in the three scenarios for Czech-English test set using the Lasso model",
"uris": null,
"num": null
},
"TABREF0": {
"text": "Curve families.",
"num": null,
"type_str": "table",
"html": null,
"content": "<table/>"
},
"TABREF1": {
"text": "). 2",
"num": null,
"type_str": "table",
"html": null,
"content": "<table><tr><td>Domain</td><td>Source Language</td><td>Target Language</td><td># Test sets</td></tr><tr><td>Europarl (Koehn, 2005)</td><td>Fr, De, Es En</td><td>En Fr, De, Es</td><td>4</td></tr><tr><td>KFTT</td><td/><td/><td/></tr></table>"
},
"TABREF2": {
"text": "The translation systems used for the curve fitting experiments, comprising 30 language-pair and domain combinations for a total of 96 learning curves.",
"num": null,
"type_str": "table",
"html": null,
"content": "<table><tr><td>Language codes: Cz=Czech, Da=Danish, En=English,</td></tr><tr><td>De=German, Fr=French, Jp=Japanese, Es=Spanish</td></tr></table>"
},
"TABREF4": {
"text": "Evaluation of the goodness of fit for the six families.",
"num": null,
"type_str": "table",
"html": null,
"content": "<table/>"
},
"TABREF6": {
"text": "Root mean squared error of the linear regression models for each anchor size",
"num": null,
"type_str": "table",
"html": null,
"content": "<table/>"
},
"TABREF8": {
"text": "Root mean squared error of the combined curves at the three anchor sizes",
"num": null,
"type_str": "table",
"html": null,
"content": "<table/>"
}
}
}
} |