File size: 104,005 Bytes
6fa4bc9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 | {
"paper_id": "P13-1036",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T09:35:25.969117Z"
},
"title": "Scalable Decipherment for Machine Translation via Hash Sampling",
"authors": [
{
"first": "Sujith",
"middle": [],
"last": "Ravi",
"suffix": "",
"affiliation": {},
"email": "sravi@gooogle.com"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "In this paper, we propose a new Bayesian inference method to train statistical machine translation systems using only nonparallel corpora. Following a probabilistic decipherment approach, we first introduce a new framework for decipherment training that is flexible enough to incorporate any number/type of features (besides simple bag-of-words) as side-information used for estimating translation models. In order to perform fast, efficient Bayesian inference in this framework, we then derive a hash sampling strategy that is inspired by the work of Ahmed et al. (2012). The new translation hash sampler enables us to scale elegantly to complex models (for the first time) and large vocabulary/corpora sizes. We show empirical results on the OPUS data-our method yields the best BLEU scores compared to existing approaches, while achieving significant computational speedups (several orders faster). We also report for the first time-BLEU score results for a largescale MT task using only non-parallel data (EMEA corpus).",
"pdf_parse": {
"paper_id": "P13-1036",
"_pdf_hash": "",
"abstract": [
{
"text": "In this paper, we propose a new Bayesian inference method to train statistical machine translation systems using only nonparallel corpora. Following a probabilistic decipherment approach, we first introduce a new framework for decipherment training that is flexible enough to incorporate any number/type of features (besides simple bag-of-words) as side-information used for estimating translation models. In order to perform fast, efficient Bayesian inference in this framework, we then derive a hash sampling strategy that is inspired by the work of Ahmed et al. (2012). The new translation hash sampler enables us to scale elegantly to complex models (for the first time) and large vocabulary/corpora sizes. We show empirical results on the OPUS data-our method yields the best BLEU scores compared to existing approaches, while achieving significant computational speedups (several orders faster). We also report for the first time-BLEU score results for a largescale MT task using only non-parallel data (EMEA corpus).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Statistical machine translation (SMT) systems these days are built using large amounts of bilingual parallel corpora. The parallel corpora are used to estimate translation model parameters involving word-to-word translation tables, fertilities, distortion, phrase translations, syntactic transformations, etc. But obtaining parallel data is an expensive process and not available for all language pairs or domains. On the other hand, monolingual data (in written form) exists and is easier to obtain for many languages. Learning translation models from monolingual corpora could help address the challenges faced by modern-day MT systems, especially for low resource language pairs. Recently, this topic has been receiving increasing attention from researchers and new methods have been proposed to train statistical machine translation models using only monolingual data in the source and target language. The underlying motivation behind most of these methods is that statistical properties for linguistic elements are shared across different languages and some of these similarities (mappings) could be automatically identified from large amounts of monolingual data.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The MT literature does cover some prior work on extracting or augmenting partial lexicons using non-parallel corpora (Rapp, 1995; Fung and McKeown, 1997; Koehn and Knight, 2000; Haghighi et al., 2008) . However, none of these methods attempt to train end-to-end MT models, instead they focus on mining bilingual lexicons from monolingual corpora and often they require parallel seed lexicons as a starting point. Some of them (Haghighi et al., 2008 ) also rely on additional linguistic knowledge such as orthography, etc. to mine word translation pairs across related languages (e.g., Spanish/English). Unsupervised training methods have also been proposed in the past for related problems in decipherment (Knight and Yamada, 1999; Snyder et al., 2010; Ravi and Knight, 2011a) where the goal is to decode unknown scripts or ciphers.",
"cite_spans": [
{
"start": 117,
"end": 129,
"text": "(Rapp, 1995;",
"ref_id": "BIBREF14"
},
{
"start": 130,
"end": 153,
"text": "Fung and McKeown, 1997;",
"ref_id": "BIBREF4"
},
{
"start": 154,
"end": 177,
"text": "Koehn and Knight, 2000;",
"ref_id": "BIBREF10"
},
{
"start": 178,
"end": 200,
"text": "Haghighi et al., 2008)",
"ref_id": "BIBREF6"
},
{
"start": 426,
"end": 448,
"text": "(Haghighi et al., 2008",
"ref_id": "BIBREF6"
},
{
"start": 706,
"end": 731,
"text": "(Knight and Yamada, 1999;",
"ref_id": "BIBREF9"
},
{
"start": 732,
"end": 752,
"text": "Snyder et al., 2010;",
"ref_id": "BIBREF18"
},
{
"start": 753,
"end": 776,
"text": "Ravi and Knight, 2011a)",
"ref_id": "BIBREF15"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The body of work that is more closely related to ours include that of Ravi and Knight (2011b) who introduced a decipherment approach for training translation models using only monolingual cor-pora. Their best performing method uses an EM algorithm to train a word translation model and they show results on a Spanish/English task. Nuhn et al. (2012) extend the former approach and improve training efficiency by pruning translation candidates prior to EM training with the help of context similarities computed from monolingual corpora.",
"cite_spans": [
{
"start": 70,
"end": 93,
"text": "Ravi and Knight (2011b)",
"ref_id": "BIBREF16"
},
{
"start": 331,
"end": 349,
"text": "Nuhn et al. (2012)",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In this work we propose a new Bayesian inference method for estimating translation models from scratch using only monolingual corpora. Secondly, we introduce a new feature-based representation for sampling translation candidates that allows one to incorporate any amount of additional features (beyond simple bag-of-words) as sideinformation during decipherment training. Finally, we also derive a new accelerated sampling mechanism using locality sensitive hashing inspired by recent work on fast, probabilistic inference for unsupervised clustering (Ahmed et al., 2012) . The new sampler allows us to perform fast, efficient inference with more complex translation models (than previously used) and scale better to large vocabulary and corpora sizes compared to existing methods as evidenced by our experimental results on two different corpora.",
"cite_spans": [
{
"start": 551,
"end": 571,
"text": "(Ahmed et al., 2012)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We now describe the decipherment problem formulation for machine translation. Problem Formulation: Given a source text f (i.e., source word sequences f 1 ...f m ) and a monolingual target language corpus, our goal is to decipher the source text and produce a target translation. Contrary to standard machine translation training scenarios, here we have to estimate the translation model P \u03b8 (f |e) parameters using only monolingual data. During decipherment training, our objective is to estimate the model parameters in order to maximize the probability of the source text f as suggested by Ravi and Knight (2011b) . ",
"cite_spans": [
{
"start": 592,
"end": 615,
"text": "Ravi and Knight (2011b)",
"ref_id": "BIBREF16"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Decipherment Model for Machine Translation",
"sec_num": "2"
},
{
"text": "For P (e), we use a word n-gram language model (LM) trained on monolingual target text. We then estimate the parameters of the translation model P \u03b8 (f |e) during training.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Decipherment Model for Machine Translation",
"sec_num": "2"
},
{
"text": "Translation Model: Machine translation is a much more complex task than solving other decipherment tasks such as word substitution ciphers (Ravi and Knight, 2011b; Dou and Knight, 2012) . The mappings between languages involve non-determinism (i.e., words can have multiple translations), re-ordering of words can occur as grammar and syntax varies with language, and in addition word insertion and deletion operations are also involved.",
"cite_spans": [
{
"start": 139,
"end": 163,
"text": "(Ravi and Knight, 2011b;",
"ref_id": "BIBREF16"
},
{
"start": 164,
"end": 185,
"text": "Dou and Knight, 2012)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Decipherment Model for Machine Translation",
"sec_num": "2"
},
{
"text": "Ideally, for the translation model P (f |e) we would like to use well-known statistical models such as IBM Model 3 and estimate its parameters \u03b8 using the EM algorithm (Dempster et al., 1977) . But training becomes intractable with complex translation models and scalability is also an issue when large corpora sizes are involved and the translation tables become huge to fit in memory. So, instead we use a simplified generative process for the translation model as proposed by Ravi and Knight (2011b) and used by others (Nuhn et al., 2012) for this task:",
"cite_spans": [
{
"start": 168,
"end": 191,
"text": "(Dempster et al., 1977)",
"ref_id": "BIBREF2"
},
{
"start": 479,
"end": 502,
"text": "Ravi and Knight (2011b)",
"ref_id": "BIBREF16"
},
{
"start": 522,
"end": 541,
"text": "(Nuhn et al., 2012)",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Decipherment Model for Machine Translation",
"sec_num": "2"
},
{
"text": "1. Generate a target (e.g., English) string e = e 1 ...e l , with probability P (e) according to an n-gram language model.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Decipherment Model for Machine Translation",
"sec_num": "2"
},
{
"text": "2. Insert a NULL word at any position in the English string, with uniform probability.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Decipherment Model for Machine Translation",
"sec_num": "2"
},
{
"text": "3. For each target word token e i (including NULLs), choose a source word translation f i , with probability P \u03b8 (f i |e i ). The source word may be NULL.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Decipherment Model for Machine Translation",
"sec_num": "2"
},
{
"text": "4. Swap any pair of adjacent source words f i\u22121 , f i , with probability P (swap); set to 0.1.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Decipherment Model for Machine Translation",
"sec_num": "2"
},
{
"text": "f = f 1 ...f m , skip- ping over NULLs.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Output the foreign string",
"sec_num": "5."
},
{
"text": "Previous approaches (Ravi and Knight, 2011b; Nuhn et al., 2012) use the EM algorithm to estimate all the parameters \u03b8 in order to maximize likelihood of the foreign corpus. Instead, we propose a new Bayesian inference framework to estimate the translation model parameters. In spite of using Bayesian inference which is typically slow in practice (with standard Gibbs sampling), we show later that our method is scalable and permits decipherment training using more complex translation models (with several additional parameters).",
"cite_spans": [
{
"start": 20,
"end": 44,
"text": "(Ravi and Knight, 2011b;",
"ref_id": "BIBREF16"
},
{
"start": 45,
"end": 63,
"text": "Nuhn et al., 2012)",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Output the foreign string",
"sec_num": "5."
},
{
"text": "We now extend the generative process (described earlier) to more complex translation models. Non-local Re-ordering: The generative process described earlier limits re-ordering to local or adjacent word pairs in a source sentence. We extend this to allow re-ordering between any pair of words in the sentence. Fertility: We also add a fertility model P \u03b8 f ert to the translation model using the formula:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Adding Phrases, Flexible Reordering and Fertility to Translation Model",
"sec_num": "2.1"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "P \u03b8 f ert = i n \u03b8 (\u03c6 i |e i ) \u2022 p \u03c6 0 1 (2) n \u03b8 (\u03c6 i |e i ) = \u03b1 f ert \u2022 P 0 (\u03c6 i |e i ) + C \u2212i (e i , \u03c6 i ) \u03b1 f ert + C \u2212i (e i )",
"eq_num": "(3)"
}
],
"section": "Adding Phrases, Flexible Reordering and Fertility to Translation Model",
"sec_num": "2.1"
},
{
"text": "where, P 0 represents the base distribution (which is set to uniform) in a Chinese Restaurant Process (CRP) 1 for the fertility model and C \u2212i represents the count of events occurring in the history excluding the observation at position i. \u03c6 i is the number of source words aligned to (i.e., generated by) the target word e i . We use sparse Dirichlet priors for all the translation model components. 2 \u03c6 0 represents the target NULL word fertility and p 1 is the insertion probability which is fixed to 0.1. In addition, we set a maximum threshold for fertility values \u03c6 i \u2264 \u03b3 \u2022 m, where m is the length of the source sentence. This discourages a particular target word (e.g., NULL word) from generating too many source words in the same sentence. In our experiments, we set \u03b3 = 0.3. We enforce this constraint in the training process during sampling. 3 Modeling Phrases: Finally, we extend the translation candidate set in P \u03b8 (f i |e i ) to model phrases in addition to words for the target side (i.e., e i can now be a word or a phrase 4 previously seen in the monolingual target corpus). This greatly increases the training time since in each sampling step, we now have many more e i candidates to choose from. In Section 4, we describe how we deal with this problem by using a fast, efficient sampler based on hashing that allows us to speed up the Bayesian inference significantly whereas standard Gibbs sampling would be extremely slow.",
"cite_spans": [
{
"start": 853,
"end": 854,
"text": "3",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Adding Phrases, Flexible Reordering and Fertility to Translation Model",
"sec_num": "2.1"
},
{
"text": "Source and Target",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Feature-based representation for",
"sec_num": "3"
},
{
"text": "The model described in the previous section while being flexible in describing the translation process, poses several challenges for training. As the source and target vocabulary sizes increase the size of the translation table (|V f | \u2022 |V e |) increases significantly and often becomes too huge to fit in memory. Additionally, performing Bayesian inference with such a complex model using standard Gibbs sampling can be very slow in practice. Here, we describe a new method for doing Bayesian inference by first introducing a featurebased representation for the source and target words (or phrases) from which we then derive a novel proposal distribution for sampling translation candidates. We represent both source and target words in a vector space similar to how documents are represented in typical information retrieval settings. But unlike documents, here each word w is associated with a feature vector w 1 ...w d (where w i represents the weight for the feature indexed by i) which is constructed from monolingual corpora. For instance, context features for word w may include other words (or phrases) that appear in the immediate context (n-gram window) surrounding w in the monolingual corpus. Similarly, we can add other features based on topic models, orthography (Haghighi et al., 2008) , temporal (Klementiev et al., 2012), etc. to our representation all of which can be extracted from monolingual corpora.",
"cite_spans": [
{
"start": 1279,
"end": 1302,
"text": "(Haghighi et al., 2008)",
"ref_id": "BIBREF6"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Feature-based representation for",
"sec_num": "3"
},
{
"text": "Next, given two high dimensional vectors u and v it is possible to calculate the similarity between the two words denoted by s(u, v). The feature construction process is described in more detail below: Target Language: We represent each word (or phrase) e i with the following contextual features along with their counts: (a) f \u2212context : every (word n-gram, position) pair immediately preceding e i in the monolingual corpus (n=1, position=\u22121), (b) similar features f +context to model the context following e i , and (c) we also throw in generic context features f scontext without position informationevery word that co-occurs with e i in the same sen-tence. While the two position-features provide specific context information (may be sparse for large monolingual corpora), this feature is more generic and captures long-distance co-occurrence statistics. Source Language: Words appearing in a source sentence f are represented using the corresponding target translation e = e 1 ...e m generated for f in the current sample during training. For each source word f j \u2208 f , we look at the corresponding word e j in the target translation. We then extract all the context features of e j in the target translation sample sentence e and add these features (f \u2212context , f +context , f scontext ) with weights to the feature representation for f j .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Feature-based representation for",
"sec_num": "3"
},
{
"text": "Unlike the target word feature vectors (which can be pre-computed from the monolingual target corpus), the feature vector for every source word f j is dynamically constructed from the target translation sampled in each training iteration. This is a key distinction of our framework compared to previous approaches that use contextual similarity (or any other) features constructed from static monolingual corpora (Rapp, 1995; Koehn and Knight, 2000; Nuhn et al., 2012) .",
"cite_spans": [
{
"start": 413,
"end": 425,
"text": "(Rapp, 1995;",
"ref_id": "BIBREF14"
},
{
"start": 426,
"end": 449,
"text": "Koehn and Knight, 2000;",
"ref_id": "BIBREF10"
},
{
"start": 450,
"end": 468,
"text": "Nuhn et al., 2012)",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Feature-based representation for",
"sec_num": "3"
},
{
"text": "Note that as we add more and more features for a particular word (by training on larger monolingual corpora or adding new types of features, etc.), it results in the feature representation becoming more sparse (especially for source feature vectors) which can cause problems in efficiency as well as robustness when computing similarity against other vectors. In the next section, we will describe how we mitigate this problem by projecting into a low-dimensional space by computing hash signatures.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Feature-based representation for",
"sec_num": "3"
},
{
"text": "In all our experiments, we only use the features described above for representing source and target words. We note that the new sampling framework is easily extensible to many additional feature types (for example, monolingual topic model features, etc.) which can be efficiently handled by our inference algorithm and could further improve translation performance but we leave this for future work.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Feature-based representation for",
"sec_num": "3"
},
{
"text": "The next step is to use the feature representations described earlier and iteratively sample a target word (or phrase) translation candidate e i for every word f i in the source text f . This involves choosing from |V e | possible target candidates in every step which can be highly inefficient (and infeasible for large vocabulary sizes). One possible strategy is to compute similarity scores s(w f i , w e ) between the current source word feature vector w f i and feature vectors w e \u2208Ve for all possible candidates in the target vocabulary. Following this, we can prune the translation candidate set by keeping only the top candidates e * according to the similarity scores. Nuhn et al. 2012 ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Bayesian MT Decipherment via Hash Sampling",
"sec_num": "4"
},
{
"text": "e | \u2022 |V f | distances in time O(V 2 \u2022 log(V )), where V = max(|V e |, |V f |).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Bayesian MT Decipherment via Hash Sampling",
"sec_num": "4"
},
{
"text": "Challenges: Unfortunately, there are several additional challenges which makes inference very hard in our case. Firstly, we would like to include as many features as possible to represent the source/target words in our framework besides simple bag-of-words context similarity (for example, left-context, right-context, and other generalpurpose features based on topic models, etc.). This makes the complexity far worse (in practice) since the dimensionality of the feature vectors d is a much higher value than |V e |. Computing similarity scores alone (na\u00efvely) would incur O(|V e | \u2022 d) time which is prohibitively huge since we have to do this for every token in the source language corpus. Secondly, for Bayesian inference we need to sample from a distribution that involves computing probabilities for all the components (language model, translation model, fertility, etc.) described in Equation 1. This distribution needs to be computed for every source word token f i in the corpus, for all possible candidates e i \u2208 V e and the process has to be repeated for multiple sampling iterations (typically more than 1000). Doing standard collapsed Gibbs sampling in this scenario would be very slow and intractable.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Bayesian MT Decipherment via Hash Sampling",
"sec_num": "4"
},
{
"text": "We now present an alternative fast, efficient inference strategy that overcomes many of the challenges described above and helps accelerate the sampling process significantly. First, we set our translation models within the context of a more generic and widely known family of distributions-mixtures of exponential families. Then we derive a novel proposal distribution for sampling translation candidates and introduce a new sampler for decipherment training that is based on locality sensitive hashing (LSH).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Bayesian MT Decipherment via Hash Sampling",
"sec_num": "4"
},
{
"text": "Hashing methods such as LSH have been widely used in the past in several scenarios including NLP applications (Ravichandran et al., 2005) . Most of these approaches employ LSH within heuristic methods for speeding up nearestneighbor look up and similarity computation techniques. However, we use LSH hashing within a probabilistic framework which is very different from the typical use of LSH.",
"cite_spans": [
{
"start": 110,
"end": 137,
"text": "(Ravichandran et al., 2005)",
"ref_id": "BIBREF17"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Bayesian MT Decipherment via Hash Sampling",
"sec_num": "4"
},
{
"text": "Our work is inspired by some recent work by Ahmed et al. (2012) on speeding up Bayesian inference for unsupervised clustering. We use a similar technique as theirs but a different approximate distribution for the proposal, one that is bettersuited for machine translation models and without some of the additional overhead required for computing certain terms in the original formulation.",
"cite_spans": [
{
"start": 44,
"end": 63,
"text": "Ahmed et al. (2012)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Bayesian MT Decipherment via Hash Sampling",
"sec_num": "4"
},
{
"text": "The translation models described earlier (Section 2) can be represented as mixtures of exponential families, specifically mixtures of multinomials. In exponential families, distributions over random variables are given by:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Mixtures of Exponential Families:",
"sec_num": null
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "p(x; \u03b8) = exp( \u03c6(x), \u03b8 ) \u2212 g(\u03b8)",
"eq_num": "(4)"
}
],
"section": "Mixtures of Exponential Families:",
"sec_num": null
},
{
"text": "where, \u03c6 : X \u2192 F is a map from x to the space of sufficient statistics and \u03b8 \u2208 F. The term g(\u03b8) ensures that p(x; \u03b8) is properly normalized. X is the domain of observations X = x 1 , ..., x m drawn from some distribution p. Our goal is to estimate p. In our case, this refers to the translation model from Equation 1.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Mixtures of Exponential Families:",
"sec_num": null
},
{
"text": "We also choose corresponding conjugate Dirichlet distributions for priors which have the property that the posterior distribution p(\u03b8|X) over \u03b8 remains in the same family as p(\u03b8).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Mixtures of Exponential Families:",
"sec_num": null
},
{
"text": "Note that the (translation) model in our case consists of multiple exponential families components-a multinomial pertaining to the language model (which remains fixed 5 ), and other components pertaining to translation probabilities P \u03b8 (f i |e i ), fertility P \u03b8 f ert , etc. To do collapsed Gibbs sampling under this model, we would perform the following steps during sampling: 1. For a given source word token f i draw target translation",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Mixtures of Exponential Families:",
"sec_num": null
},
{
"text": "e i \u223c p(e i |F, E \u2212i ) \u221d p(e) \u2022 p(f i |e i , F \u2212i , E \u2212i ) \u2022 p f ert (\u2022|e i , F \u2212i , E \u2212i ) \u2022 ... (5)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Mixtures of Exponential Families:",
"sec_num": null
},
{
"text": "where, F is the full source text and E the full target translation generated during sampling. 2. Update the sufficient statistics for the changed target translation assignments.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Mixtures of Exponential Families:",
"sec_num": null
},
{
"text": "For large target vocabularies, computing p(f i |e i , F \u2212i , E \u2212i ) dominates the inference procedure. We can accelerate this step significantly using a good proposal distribution via hashing. Locality Sensitive Hash Sampling: For general exponential families, here is a Taylor approximation for the data likelihood term (Ahmed et al., 2012) :",
"cite_spans": [
{
"start": 321,
"end": 341,
"text": "(Ahmed et al., 2012)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Mixtures of Exponential Families:",
"sec_num": null
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "p(x|\u2022) \u2248 exp( \u03c6(x), \u03b8 * ) \u2212 g(\u03b8 * )",
"eq_num": "(6)"
}
],
"section": "Mixtures of Exponential Families:",
"sec_num": null
},
{
"text": "where, \u03b8 * is the expected parameter (sufficient statistics). For sampling the translation model, this involves computing an expensive inner product \u03c6(f i ), \u03b8 * e for each source word f i which has to be repeated for every translation candidate e , including candidates that have very low probabilities and are unlikely to be chosen as the translation for f j .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Mixtures of Exponential Families:",
"sec_num": null
},
{
"text": "So, during decipherment training a standard collapsed Gibbs sampler will waste most of its time on expensive computations that will be discarded in the end anyways. Also, unlike some standard generative models used in other unsupervised learning scenarios (e.g., clustering) that model only observed features (namely words appearing in the document), here we would like to enrich the translation model with a lot more features (side-information).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Mixtures of Exponential Families:",
"sec_num": null
},
{
"text": "Instead, we can accelerate the computation of the inner product \u03c6(f i ), \u03b8 * e using a hash sampling strategy similar to (Ahmed et al., 2012) . The underlying idea here is to use binary hashing (Charikar, 2002) to explore only those candidates e that are sufficiently close to the best matching translation via a proposal distribution. Next, we briefly introduce some notations and existing theoretical results related to binary hashing before describing the hash sampling procedure.",
"cite_spans": [
{
"start": 121,
"end": 141,
"text": "(Ahmed et al., 2012)",
"ref_id": "BIBREF0"
},
{
"start": 194,
"end": 210,
"text": "(Charikar, 2002)",
"ref_id": "BIBREF1"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Mixtures of Exponential Families:",
"sec_num": null
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "u, v \u2208 R n , u, v = u \u2022 v \u2022 cos (u, v)",
"eq_num": "(7)"
}
],
"section": "For any two vectors",
"sec_num": null
},
{
"text": "(u, v) = \u03c0P r{sgn[ u, w ] = sgn[ v, w ]} (8) where, w is a random vector drawn from a symmetric spherical distribution and the term inside P r{\u2022} represents the relation between the signs of the two inner products.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "For any two vectors",
"sec_num": null
},
{
"text": "Let",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "For any two vectors",
"sec_num": null
},
{
"text": "h l (v) \u2208 {0, 1} l be an l-bit binary hash of v where: [h l (v)] i := sgn[ v, w i ]; w i \u223c U m .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "For any two vectors",
"sec_num": null
},
{
"text": "Then the probability of matching signs is given by:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "For any two vectors",
"sec_num": null
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "z l (u, v) := 1 l h(u) \u2212 h(v) 1",
"eq_num": "(9)"
}
],
"section": "For any two vectors",
"sec_num": null
},
{
"text": "So, z l (u, v) measures how many bits differ between the hash vectors h(u) and h(v) associated with u, v. Combining this with Equations 6 and 7 we can estimate the unnormalized log-likelihood of a source word f i being translated as target e via:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "For any two vectors",
"sec_num": null
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "s l (f i , e ) \u221d \u03b8 e \u2022 \u03c6(f i ) \u2022 cos \u03c0z l (\u03c6(f i ), \u03b8 e )",
"eq_num": "(10"
}
],
"section": "For any two vectors",
"sec_num": null
},
{
"text": ") For each source word f i , we now sample from this new distribution (after normalization) instead of the original one. The binary hash representation for the two vectors yield significant speedups during sampling since Hamming distance computation between h(u) and h(v) is highly optimized on modern CPUs. Hence, we can compute an estimate for the inner product quite efficiently. 6 Updating the hash signatures: During training, we compute the target candidate projection h(\u03b8 e ) and corresponding norm only once 7 which is different from the setup of Ahmed et al. (2012) . The source word projection \u03c6(f i ) is dynamically updated in every sampling step. Note that doing this na\u00efvely would scale slowly as O(Dl) where D is the total number of features but instead we can update the hash signatures in a more efficient manner that scales as O(D i >0 l) where D i >0 is the number of non-zero entries in the feature representation for the source word \u03c6(f i ). Also, we do not need to store the random vectors w in practice since these can be computed on the fly using hash functions. The inner product approximation also yields some theoretical guarantees for the hash sampler. 8",
"cite_spans": [
{
"start": 383,
"end": 384,
"text": "6",
"ref_id": null
},
{
"start": 555,
"end": 574,
"text": "Ahmed et al. (2012)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "For any two vectors",
"sec_num": null
},
{
"text": "In each sampling step, we use the distribution from Equation 10 as a proposal distribution in a Metropolis Hastings scheme to sample target translations for each source word.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Metropolis Hastings",
"sec_num": "4.1"
},
{
"text": "Once a new target translation e is sampled for source word f i from the proposal distribution q(\u2022) \u221d exp s l (f i ,e ) , we accept the proposal (and update the corresponding hash signatures) according to the probability r",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Metropolis Hastings",
"sec_num": "4.1"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "r = q(e old i ) \u2022 p new (\u2022) q(e new i ) \u2022 p old (\u2022)",
"eq_num": "(11)"
}
],
"section": "Metropolis Hastings",
"sec_num": "4.1"
},
{
"text": "where, p old (\u2022), p new (\u2022) are the true conditional likelihood probabilities according to our model (including the language model component) for the old, new sample respectively.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Metropolis Hastings",
"sec_num": "4.1"
},
{
"text": "Putting together all the pieces described in the previous section, we perform the following steps: 1. Initialization: We initialize the starting sample as follows: for each source word token, randomly sample a target word. If the source word also exists in the target vocabulary, then choose identity translation instead of the random one. 9 2. Hash Sampling Steps: For each source word token f i , run the hash sampler:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Training Algorithm",
"sec_num": "5"
},
{
"text": "(a) Generate a proposal distribution by computing the hamming distance between the feature vectors for the source word and each target translation candidate. Sample a new target translation e i for f i from this distribution.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Training Algorithm",
"sec_num": "5"
},
{
"text": "(b) Compute the acceptance probability for the chosen translation using a Metropolis Hastings scheme and accept (or reject) the sample. In practice, computation of the acceptance probability only needs to be done every r iterations (where r can be anywhere from 5 or 100). Iterate through steps (2a) and (2b) for every word in the source text and then repeat this process for multiple iterations (usually 1000). 3. Other Sampling Operators: After every k iterations, 10 perform the following sampling operations:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Training Algorithm",
"sec_num": "5"
},
{
"text": "(a) Re-ordering: For each source word token f i at position i, randomly choose another position j in the source sentence and swap the translations e i with e j . During the sampling process, we compute the probabilities for the two samples-the original and the swapped versions, and then sample an alignment from this distribution. (b) Deletion: For each source word token, delete the current target translation (i.e., align it with the target NULL token). As with the reordering operation, we sample from a distribution consisting of the original and the deleted versions. 4. Decoding the foreign sentence: Finally, once the training is done (i.e., after all sampling iterations) we choose the final sample as our target translation output for the source text.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Training Algorithm",
"sec_num": "5"
},
{
"text": "We test our method on two different corpora. To evaluate translation quality, we use BLEU score (Papineni et al., 2002) , a standard evaluation measure used in machine translation.",
"cite_spans": [
{
"start": 96,
"end": 119,
"text": "(Papineni et al., 2002)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments and Results",
"sec_num": "6"
},
{
"text": "First, we present MT results on non-parallel Spanish/English data from the OPUS corpus (Tiedemann, 2009) which was used by Ravi and Knight (2011b) and Nuhn et al. (2012) . We show that our method achieves the best performance (BLEU scores) on this task while being significantly faster than both the previous approaches. We then apply our method to a much larger non-parallel French/Spanish corpus constructed from the EMEA corpus (Tiedemann, 2009) . Here the vocabulary sizes are much larger and we show how our new Bayesian decipherment method scales well to this task inspite of using complex translation models. We also report the first BLEU results on such a large-scale MT task under truly non-parallel settings (without using any parallel data or seed lexicon).",
"cite_spans": [
{
"start": 87,
"end": 104,
"text": "(Tiedemann, 2009)",
"ref_id": "BIBREF19"
},
{
"start": 123,
"end": 146,
"text": "Ravi and Knight (2011b)",
"ref_id": "BIBREF16"
},
{
"start": 151,
"end": 169,
"text": "Nuhn et al. (2012)",
"ref_id": "BIBREF12"
},
{
"start": 431,
"end": 448,
"text": "(Tiedemann, 2009)",
"ref_id": "BIBREF19"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments and Results",
"sec_num": "6"
},
{
"text": "For both the MT tasks, we also report BLEU scores for a baseline system using identity translations for common words (words appearing in both source/target vocabularies) and random translations for other words.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments and Results",
"sec_num": "6"
},
{
"text": "OPUS movie subtitle corpus (Tiedemann, 2009) : This is a large open source collection of parallel corpora available for multiple language pairs. We use the same non-parallel Spanish/English corpus used in previous works (Ravi and Knight, 2011b; Nuhn et al., 2012) . The details of the corpus are listed in Table 1 . We use the entire Spanish source text for decipherment training and evaluate the final English output to report BLEU scores. EMEA corpus (Tiedemann, 2009) : This is a parallel corpus made out of PDF documents (articles from the medical domain) from the European Medicines Agency. We reserve the first 1k sentences in French as our source text (also used in decipherment training). To construct a nonparallel corpus, we split the remaining 1.1M lines as follows: first 550k sentences in French, last 550k sentences in Spanish. The latter is used to construct a target language model used for decipherment training. The corpus statistics are shown in Table 1 .",
"cite_spans": [
{
"start": 27,
"end": 44,
"text": "(Tiedemann, 2009)",
"ref_id": "BIBREF19"
},
{
"start": 220,
"end": 244,
"text": "(Ravi and Knight, 2011b;",
"ref_id": "BIBREF16"
},
{
"start": 245,
"end": 263,
"text": "Nuhn et al., 2012)",
"ref_id": "BIBREF12"
},
{
"start": 453,
"end": 470,
"text": "(Tiedemann, 2009)",
"ref_id": "BIBREF19"
}
],
"ref_spans": [
{
"start": 306,
"end": 313,
"text": "Table 1",
"ref_id": "TABREF2"
},
{
"start": 965,
"end": 972,
"text": "Table 1",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "MT Task and Data",
"sec_num": "6.1"
},
{
"text": "We compare the MT results (BLEU scores) from different systems on the OPUS corpus in Table 2 . The first row displays baseline performance. The next three rows 1a-1c display performance achieved by two methods from Ravi and Knight (2011b) . Rows 2a, 2b show results from the of Nuhn et al. (2012) . The last two rows display results for the new method using Bayesian hash sampling. Overall, using a 3-gram language model (instead of 2-gram) for decipherment training improves the performance for all methods. We observe that our method produces much better results than the others even with a 2-gram LM. With a 3-gram LM, the new method achieves the best performance; the highest BLEU score reported on this task. It is also interesting to note that the hash sampling method yields much better results than the Bayesian inference method presented in (Ravi and Knight, 2011b) . This is due to the accelerated sampling scheme introduced earlier which helps it converge to better solutions faster. Table 2 (last column) also compares the efficiency of different methods in terms of CPU time required for training. Both our 2-gram and 3-gram based methods are significantly faster than those previously reported for EM based training methods presented in (Ravi and Knight, 2011b; Nuhn Method BLEU Time (hours) Baseline system (identity translations)",
"cite_spans": [
{
"start": 215,
"end": 238,
"text": "Ravi and Knight (2011b)",
"ref_id": "BIBREF16"
},
{
"start": 278,
"end": 296,
"text": "Nuhn et al. (2012)",
"ref_id": "BIBREF12"
},
{
"start": 850,
"end": 874,
"text": "(Ravi and Knight, 2011b)",
"ref_id": "BIBREF16"
},
{
"start": 1251,
"end": 1275,
"text": "(Ravi and Knight, 2011b;",
"ref_id": "BIBREF16"
},
{
"start": 1276,
"end": 1305,
"text": "Nuhn Method BLEU Time (hours)",
"ref_id": null
}
],
"ref_spans": [
{
"start": 85,
"end": 92,
"text": "Table 2",
"ref_id": "TABREF3"
},
{
"start": 995,
"end": 1002,
"text": "Table 2",
"ref_id": "TABREF3"
}
],
"eq_spans": [],
"section": "OPUS:",
"sec_num": null
},
{
"text": "6.9 1a. EM with 2-gram LM (Ravi and Knight, 2011b) 15.3 \u223c850h 1b. EM with whole-segment LM (Ravi and Knight, 2011b) 19.3 1c. Bayesian IBM Model 3 with 2-gram LM (Ravi and Knight, 2011b) 15.1 2a. EM+Context with 2-gram LM (Nuhn et al., 2012) 15.2 50h 2b. EM+Context with 3-gram LM (Nuhn et al., 2012) 20.9 200h 3. Bayesian (standard) Gibbs sampling with 2-gram LM 222h 4a. Bayesian Hash Sampling * with 2-gram LM (this work) 20.3 2.6h 4b. Bayesian Hash Sampling * with 3-gram LM (this work) 21.2 2.7h ( * sampler was run for 1000 iterations) Table 2 : Comparison of MT performance (BLEU scores) and efficiency (running time in CPU hours) on the Spanish/English OPUS corpus using only non-parallel corpora for training. For the Bayesian methods 4a and 4b, the samplers were run for 1000 iterations each on a single machine (1.8GHz Intel processor). For 1a, 2a, 2b, we list the training times as reported by Nuhn et al. (2012) based on their EM implementation for different settings. 3.0 Bayesian Hash Sampling with 2-gram LM vocab=full (V e ), add fertility=no 4.2 vocab=pruned * , add fertility=yes 5.3 Table 3 : MT results on the French/Spanish EMEA corpus using the new hash sampling method. * The last row displays results when we sample target translations from a pruned candidate set (most frequent 1k Spanish words + identity translation candidates) which enables the sampler to run much faster when using more complex models. et al., 2012) . This is very encouraging since Nuhn et al. (2012) reported obtaining a speedup by pruning translation candidates (to \u223c1/8th the original size) prior to EM training. On the other hand, we sample from the full set of translation candidates including additional target phrase (of size 2) candidates which results in a much larger vocabulary consisting of 1600 candidates (\u223c4 times the original size), yet our method runs much faster and yields better results. The table also demonstrates the siginificant speedup achieved by the hash sampler over a standard Gibbs sampler for the same model (\u223c85 times faster when using a 2-gram LM). We also compare the results against MT performance from parallel training-MOSES system (Koehn et al., 2007) trained on 20k sentence pairs. The comparable number for Table 4 : Sample (1-best) Spanish/French translations produced by the new method on the EMEA corpus using word translation models trained with non-parallel corpora. Table 3 shows the results achieved by our method on the larger task involving EMEA corpus. Here, the target vocabulary V e is much higher (67k). In spite of this challenge and the model complexity, we can still perform decipherment training using Bayesian inference. We report the first BLEU score results on such a large-scale task using a 2-gram LM. This is achieved without using any seed lexicon or parallel corpora. The results are encouraging and demonstrates the ability of the method to scale to large-scale settings while performing efficient inference with complex models, which we believe will be especially useful for future MT application in scenarios where parallel data is hard to obtain. Table 4 displays some sample 1-best translations learned using this method.",
"cite_spans": [
{
"start": 26,
"end": 50,
"text": "(Ravi and Knight, 2011b)",
"ref_id": "BIBREF16"
},
{
"start": 91,
"end": 115,
"text": "(Ravi and Knight, 2011b)",
"ref_id": "BIBREF16"
},
{
"start": 161,
"end": 185,
"text": "(Ravi and Knight, 2011b)",
"ref_id": "BIBREF16"
},
{
"start": 221,
"end": 240,
"text": "(Nuhn et al., 2012)",
"ref_id": "BIBREF12"
},
{
"start": 280,
"end": 299,
"text": "(Nuhn et al., 2012)",
"ref_id": "BIBREF12"
},
{
"start": 905,
"end": 923,
"text": "Nuhn et al. (2012)",
"ref_id": "BIBREF12"
},
{
"start": 1432,
"end": 1445,
"text": "et al., 2012)",
"ref_id": null
},
{
"start": 1479,
"end": 1497,
"text": "Nuhn et al. (2012)",
"ref_id": "BIBREF12"
},
{
"start": 2166,
"end": 2186,
"text": "(Koehn et al., 2007)",
"ref_id": "BIBREF11"
}
],
"ref_spans": [
{
"start": 541,
"end": 548,
"text": "Table 2",
"ref_id": "TABREF3"
},
{
"start": 1102,
"end": 1109,
"text": "Table 3",
"ref_id": null
},
{
"start": 2244,
"end": 2251,
"text": "Table 4",
"ref_id": null
},
{
"start": 2409,
"end": 2416,
"text": "Table 3",
"ref_id": null
},
{
"start": 3113,
"end": 3120,
"text": "Table 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "OPUS:",
"sec_num": null
},
{
"text": "For comparison purposes, we also evaluate MT performance on this task using parallel training (MOSES trained with hundred sentence pairs) and observe a BLEU score of 11.7.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "EMEA Results",
"sec_num": null
},
{
"text": "There exists some work (Dou and Knight, 2012; Klementiev et al., 2012) that uses monolingual corpora to induce phrase tables, etc. These when combined with standard MT systems such as Moses (Koehn et al., 2007) trained on parallel corpora, have been shown to yield some BLEU score improvements. Nuhn et al. (2012) show some sample English/French lexicon entries learnt using EM algorithm with a pruned translation candidate set on a portion of the Gigaword corpus 11 but do not report any actual MT results. In addition, as we showed earlier our method can use Bayesian inference (which has a lot of nice properties compared to EM for unsupervised natural language tasks (Johnson, 2007; Goldwater and Griffiths, 2007) ) and still scale easily to large vocabulary, data sizes while allowing the models to grow in complexity. Most importantly, our method produces better translation results (as demonstrated on the OPUS MT task). And to our knowledge, this is the first time that anyone has reported MT results under truly non-parallel settings on such a large-scale task (EMEA).",
"cite_spans": [
{
"start": 23,
"end": 45,
"text": "(Dou and Knight, 2012;",
"ref_id": "BIBREF3"
},
{
"start": 46,
"end": 70,
"text": "Klementiev et al., 2012)",
"ref_id": "BIBREF8"
},
{
"start": 190,
"end": 210,
"text": "(Koehn et al., 2007)",
"ref_id": "BIBREF11"
},
{
"start": 295,
"end": 313,
"text": "Nuhn et al. (2012)",
"ref_id": "BIBREF12"
},
{
"start": 671,
"end": 686,
"text": "(Johnson, 2007;",
"ref_id": "BIBREF7"
},
{
"start": 687,
"end": 717,
"text": "Goldwater and Griffiths, 2007)",
"ref_id": "BIBREF5"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion and Future Work",
"sec_num": "7"
},
{
"text": "Our method is also easily extensible to outof-domain translation scenarios similar to (Dou and Knight, 2012) . While their work also uses Bayesian inference with a slice sampling scheme, our new approach uses a novel hash sampling scheme for decipherment that can easily scale to more complex models. The new decipherment framework also allows one to easily incorporate additional information (besides standard word translations) as features (e.g., context features, topic features, etc.) for unsupervised machine translation which can help further improve the performance in addition to accelerating the sampling process. We already demonstrated the utility of this system by going beyond words and incorporating phrase translations in a decipherment model for the first time.",
"cite_spans": [
{
"start": 86,
"end": 108,
"text": "(Dou and Knight, 2012)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion and Future Work",
"sec_num": "7"
},
{
"text": "In the future, we can obtain further speedups (especially for large-scale tasks) by parallelizing the sampling scheme seamlessly across multiple machines and CPU cores. The new framework can also be stacked with complementary techniques such as slice sampling, blocked (and type) sampling to further improve inference efficiency.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion and Future Work",
"sec_num": "7"
},
{
"text": "11 http://www.ldc.upenn.edu/Catalog/catalogEntry.jsp? catalogId=LDC2003T05",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion and Future Work",
"sec_num": "7"
},
{
"text": "To summarize, our method is significantly faster than previous methods based on EM or Bayesian with standard Gibbs sampling and obtains better results than any previously published methods for the same task. The new framework also allows performing Bayesian inference for decipherment applications with more complex models than previously shown. We believe this framework will be useful for further extending MT models in the future to improve translation performance and for many other unsupervised decipherment application scenarios.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "8"
},
{
"text": "Each component in the translation model (word/phrase translations P \u03b8 (fi|ei), fertility P \u03b8 f ert , etc.) is modeled using a CRP formulation.2 i.e., All the concentration parameters are set to low values; \u03b1 f |e = \u03b1 f ert = 0.01.3 We only apply this constraint when training on source text/corpora made of long sentences (>10 words) where the sampler might converge very slowly. For short sentences, a sparse prior on fertility \u03b1 f ert typically discourages a target word from being aligned to too many different source words.4 Phrase size is limited to two words in our experiments.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "A high value for the LM concentration parameter \u03b1 ensures that the LM probabilities do not deviate too far from the original fixed base distribution during sampling.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "We set l = 32 bits in our experiments.7 In practice, we can ignore the norm terms to further speed up sampling since this is only an estimate for the proposal distribution and we follow this with the Metropolis Hastings step.8 For further details, please refer to(Ahmed et al., 2012).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Initializing with identity translation rather than random choice helps in some cases, especially for unknown words that involve named entities, etc.10 We set k = 3 in our experiments.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Fastex: Hash clustering with exponential families",
"authors": [
{
"first": "Amr",
"middle": [],
"last": "Ahmed",
"suffix": ""
},
{
"first": "Sujith",
"middle": [],
"last": "Ravi",
"suffix": ""
},
{
"first": "Shravan",
"middle": [],
"last": "Narayanamurthy",
"suffix": ""
},
{
"first": "Alex",
"middle": [],
"last": "Smola",
"suffix": ""
}
],
"year": 2012,
"venue": "Proceedings of the 26th Conference on Neural Information Processing Systems (NIPS)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Amr Ahmed, Sujith Ravi, Shravan Narayanamurthy, and Alex Smola. 2012. Fastex: Hash clustering with exponential families. In Proceedings of the 26th Conference on Neural Information Processing Systems (NIPS).",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Similarity estimation techniques from rounding algorithms",
"authors": [
{
"first": "S",
"middle": [],
"last": "Moses",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Charikar",
"suffix": ""
}
],
"year": 2002,
"venue": "Proceedings of the thiry-fourth annual ACM Symposium on Theory of Computing",
"volume": "",
"issue": "",
"pages": "380--388",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Moses S. Charikar. 2002. Similarity estimation tech- niques from rounding algorithms. In Proceedings of the thiry-fourth annual ACM Symposium on Theory of Computing, pages 380-388.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Maximum likelihood from incomplete data via the em algorithm",
"authors": [
{
"first": "A",
"middle": [
"P"
],
"last": "Dempster",
"suffix": ""
},
{
"first": "N",
"middle": [
"M"
],
"last": "Laird",
"suffix": ""
},
{
"first": "D",
"middle": [
"B"
],
"last": "Rubin",
"suffix": ""
}
],
"year": 1977,
"venue": "Journal of the Royal Statistical Society, Series B",
"volume": "39",
"issue": "1",
"pages": "1--38",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "A. P. Dempster, N. M. Laird, and D. B. Rubin. 1977. Maximum likelihood from incomplete data via the em algorithm. Journal of the Royal Statistical Soci- ety, Series B, 39(1):1-38.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Large scale decipherment for out-of-domain machine translation",
"authors": [
{
"first": "Qing",
"middle": [],
"last": "Dou",
"suffix": ""
},
{
"first": "Kevin",
"middle": [],
"last": "Knight",
"suffix": ""
}
],
"year": 2012,
"venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning",
"volume": "",
"issue": "",
"pages": "266--275",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Qing Dou and Kevin Knight. 2012. Large scale deci- pherment for out-of-domain machine translation. In Proceedings of the 2012 Joint Conference on Empir- ical Methods in Natural Language Processing and Computational Natural Language Learning, pages 266-275.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Finding terminology translations from non-parallel corpora",
"authors": [
{
"first": "Pascale",
"middle": [],
"last": "Fung",
"suffix": ""
},
{
"first": "Kathleen",
"middle": [],
"last": "Mckeown",
"suffix": ""
}
],
"year": 1997,
"venue": "Proceedings of the 5th Annual Workshop on Very Large Corpora",
"volume": "",
"issue": "",
"pages": "192--202",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Pascale Fung and Kathleen McKeown. 1997. Finding terminology translations from non-parallel corpora. In Proceedings of the 5th Annual Workshop on Very Large Corpora, pages 192-202.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "A fully bayesian approach to unsupervised part-of-speech tagging",
"authors": [
{
"first": "Sharon",
"middle": [],
"last": "Goldwater",
"suffix": ""
},
{
"first": "Tom",
"middle": [],
"last": "Griffiths",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the 45th Annual Meeting of the Association of Computational Linguistics",
"volume": "",
"issue": "",
"pages": "744--751",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sharon Goldwater and Tom Griffiths. 2007. A fully bayesian approach to unsupervised part-of-speech tagging. In Proceedings of the 45th Annual Meet- ing of the Association of Computational Linguistics, pages 744-751.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Learning bilingual lexicons from monolingual corpora",
"authors": [
{
"first": "Aria",
"middle": [],
"last": "Haghighi",
"suffix": ""
},
{
"first": "Percy",
"middle": [],
"last": "Liang",
"suffix": ""
},
{
"first": "Taylor",
"middle": [],
"last": "Berg-Kirkpatrick",
"suffix": ""
},
{
"first": "Dan",
"middle": [],
"last": "Klein",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of ACL: HLT",
"volume": "",
"issue": "",
"pages": "771--779",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Aria Haghighi, Percy Liang, Taylor Berg-Kirkpatrick, and Dan Klein. 2008. Learning bilingual lexicons from monolingual corpora. In Proceedings of ACL: HLT, pages 771-779.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Why doesn't EM find good HMM POS-taggers?",
"authors": [
{
"first": "Mark",
"middle": [],
"last": "Johnson",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)",
"volume": "",
"issue": "",
"pages": "296--305",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mark Johnson. 2007. Why doesn't EM find good HMM POS-taggers? In Proceedings of the Joint Conference on Empirical Methods in Natural Lan- guage Processing and Computational Natural Lan- guage Learning (EMNLP-CoNLL), pages 296-305.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Toward statistical machine translation without parallel corpora",
"authors": [
{
"first": "Alex",
"middle": [],
"last": "Klementiev",
"suffix": ""
},
{
"first": "Ann",
"middle": [],
"last": "Irvine",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "Callison-Burch",
"suffix": ""
},
{
"first": "David",
"middle": [],
"last": "Yarowsky",
"suffix": ""
}
],
"year": 2012,
"venue": "Proceedings of the 13th Conference of the European Chapter of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Alex Klementiev, Ann Irvine, Chris Callison-Burch, and David Yarowsky. 2012. Toward statistical ma- chine translation without parallel corpora. In Pro- ceedings of the 13th Conference of the European Chapter of the Association for Computational Lin- guistics.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "A computational approach to deciphering unknown scripts",
"authors": [
{
"first": "Kevin",
"middle": [],
"last": "Knight",
"suffix": ""
},
{
"first": "Kenji",
"middle": [],
"last": "Yamada",
"suffix": ""
}
],
"year": 1999,
"venue": "Proceedings of the ACL Workshop on Unsupervised Learning in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "37--44",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kevin Knight and Kenji Yamada. 1999. A computa- tional approach to deciphering unknown scripts. In Proceedings of the ACL Workshop on Unsupervised Learning in Natural Language Processing, pages 37-44.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Estimating word translation probabilities from unrelated monolingual corpora using the em algorithm",
"authors": [
{
"first": "Philipp",
"middle": [],
"last": "Koehn",
"suffix": ""
},
{
"first": "Kevin",
"middle": [],
"last": "Knight",
"suffix": ""
}
],
"year": 2000,
"venue": "Proceedings of the Seventeenth National Conference on Artificial Intelligence and Twelfth Conference on Innovative Applications of Artificial Intelligence",
"volume": "",
"issue": "",
"pages": "711--715",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Philipp Koehn and Kevin Knight. 2000. Estimating word translation probabilities from unrelated mono- lingual corpora using the em algorithm. In Proceed- ings of the Seventeenth National Conference on Ar- tificial Intelligence and Twelfth Conference on Inno- vative Applications of Artificial Intelligence, pages 711-715.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Moses: open source toolkit for statistical machine translation",
"authors": [
{
"first": "Philipp",
"middle": [],
"last": "Koehn",
"suffix": ""
},
{
"first": "Hieu",
"middle": [],
"last": "Hoang",
"suffix": ""
},
{
"first": "Alexandra",
"middle": [],
"last": "Birch",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "Callison-Burch",
"suffix": ""
},
{
"first": "Marcello",
"middle": [],
"last": "Federico",
"suffix": ""
},
{
"first": "Nicola",
"middle": [],
"last": "Bertoldi",
"suffix": ""
},
{
"first": "Brooke",
"middle": [],
"last": "Cowan",
"suffix": ""
},
{
"first": "Wade",
"middle": [],
"last": "Shen",
"suffix": ""
},
{
"first": "Christine",
"middle": [],
"last": "Moran",
"suffix": ""
},
{
"first": "Richard",
"middle": [],
"last": "Zens",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "Dyer",
"suffix": ""
},
{
"first": "Ond\u0159ej",
"middle": [],
"last": "Bojar",
"suffix": ""
},
{
"first": "Alexandra",
"middle": [],
"last": "Constantin",
"suffix": ""
},
{
"first": "Evan",
"middle": [],
"last": "Herbst",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the 45th Annual Meeting of the ACL on Interactive Poster and Demonstration Sessions",
"volume": "",
"issue": "",
"pages": "177--180",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Philipp Koehn, Hieu Hoang, Alexandra Birch, Chris Callison-Burch, Marcello Federico, Nicola Bertoldi, Brooke Cowan, Wade Shen, Christine Moran, Richard Zens, Chris Dyer, Ond\u0159ej Bojar, Alexandra Constantin, and Evan Herbst. 2007. Moses: open source toolkit for statistical machine translation. In Proceedings of the 45th Annual Meeting of the ACL on Interactive Poster and Demonstration Sessions, pages 177-180.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Deciphering foreign language by combining language models and context vectors",
"authors": [
{
"first": "Malte",
"middle": [],
"last": "Nuhn",
"suffix": ""
},
{
"first": "Arne",
"middle": [],
"last": "Mauser",
"suffix": ""
},
{
"first": "Hermann",
"middle": [],
"last": "Ney",
"suffix": ""
}
],
"year": 2012,
"venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "156--164",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Malte Nuhn, Arne Mauser, and Hermann Ney. 2012. Deciphering foreign language by combining lan- guage models and context vectors. In Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics, pages 156-164.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Bleu: a method for automatic evaluation of machine translation",
"authors": [
{
"first": "Kishore",
"middle": [],
"last": "Papineni",
"suffix": ""
},
{
"first": "Salim",
"middle": [],
"last": "Roukos",
"suffix": ""
},
{
"first": "Todd",
"middle": [],
"last": "Ward",
"suffix": ""
},
{
"first": "Wei-Jing",
"middle": [],
"last": "Zhu",
"suffix": ""
}
],
"year": 2002,
"venue": "Proceedings of the 40th Annual Meeting on Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "311--318",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th Annual Meeting on Association for Computa- tional Linguistics, pages 311-318.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Identifying word translations in non-parallel texts",
"authors": [
{
"first": "Reinhard",
"middle": [],
"last": "Rapp",
"suffix": ""
}
],
"year": 1995,
"venue": "Proceedings of the 33rd Annual Meeting on Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "320--322",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Reinhard Rapp. 1995. Identifying word translations in non-parallel texts. In Proceedings of the 33rd An- nual Meeting on Association for Computational Lin- guistics, pages 320-322.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Bayesian inference for zodiac and other homophonic ciphers",
"authors": [
{
"first": "Sujith",
"middle": [],
"last": "Ravi",
"suffix": ""
},
{
"first": "Kevin",
"middle": [],
"last": "Knight",
"suffix": ""
}
],
"year": 2011,
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies",
"volume": "1",
"issue": "",
"pages": "239--247",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sujith Ravi and Kevin Knight. 2011a. Bayesian in- ference for zodiac and other homophonic ciphers. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies -Volume 1, pages 239-247.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Deciphering foreign language",
"authors": [
{
"first": "Sujith",
"middle": [],
"last": "Ravi",
"suffix": ""
},
{
"first": "Kevin",
"middle": [],
"last": "Knight",
"suffix": ""
}
],
"year": 2011,
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies",
"volume": "",
"issue": "",
"pages": "12--21",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sujith Ravi and Kevin Knight. 2011b. Deciphering foreign language. In Proceedings of the 49th An- nual Meeting of the Association for Computational Linguistics: Human Language Technologies, pages 12-21.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Randomized algorithms and nlp: using locality sensitive hash function for high speed noun clustering",
"authors": [
{
"first": "Deepak",
"middle": [],
"last": "Ravichandran",
"suffix": ""
},
{
"first": "Patrick",
"middle": [],
"last": "Pantel",
"suffix": ""
},
{
"first": "Eduard",
"middle": [],
"last": "Hovy",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of the 43rd Annual Meeting on Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "622--629",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Deepak Ravichandran, Patrick Pantel, and Eduard Hovy. 2005. Randomized algorithms and nlp: us- ing locality sensitive hash function for high speed noun clustering. In Proceedings of the 43rd Annual Meeting on Association for Computational Linguis- tics, pages 622-629.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "A statistical model for lost language decipherment",
"authors": [
{
"first": "Benjamin",
"middle": [],
"last": "Snyder",
"suffix": ""
},
{
"first": "Regina",
"middle": [],
"last": "Barzilay",
"suffix": ""
},
{
"first": "Kevin",
"middle": [],
"last": "Knight",
"suffix": ""
}
],
"year": 2010,
"venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "1048--1057",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Benjamin Snyder, Regina Barzilay, and Kevin Knight. 2010. A statistical model for lost language deci- pherment. In Proceedings of the 48th Annual Meet- ing of the Association for Computational Linguis- tics, pages 1048-1057.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "News from opus -a collection of multilingual parallel corpora with tools and interfaces",
"authors": [
{
"first": "J\u00f6rg",
"middle": [],
"last": "Tiedemann",
"suffix": ""
}
],
"year": 2009,
"venue": "Recent Advances in Natural Language Processing",
"volume": "V",
"issue": "",
"pages": "237--248",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "J\u00f6rg Tiedemann. 2009. News from opus -a collection of multilingual parallel corpora with tools and inter- faces. In N. Nicolov, K. Bontcheva, G. Angelova, and R. Mitkov, editors, Recent Advances in Natural Language Processing, volume V, pages 237-248.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"text": "(e) \u2022 P \u03b8 (f |e)",
"type_str": "figure",
"uris": null,
"num": null
},
"FIGREF1": {
"text": "MethodBLEU Baseline system (identity translations)",
"type_str": "figure",
"uris": null,
"num": null
},
"TABREF2": {
"text": "Statistics of non-parallel corpora used here.",
"type_str": "table",
"html": null,
"num": null,
"content": "<table/>"
},
"TABREF3": {
"text": "",
"type_str": "table",
"html": null,
"num": null,
"content": "<table><tr><td>is 63.6</td></tr></table>"
}
}
}
} |