File size: 80,991 Bytes
6fa4bc9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 | {
"paper_id": "D09-1007",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T16:38:16.899221Z"
},
"title": "Cube Pruning as Heuristic Search",
"authors": [
{
"first": "Mark",
"middle": [],
"last": "Hopkins",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Language Weaver, Inc",
"location": {
"addrLine": "4640 Admiralty Way, Suite 1210 Marina del Rey",
"postCode": "90292",
"region": "CA"
}
},
"email": "mhopkins@languageweaver.com"
},
{
"first": "Greg",
"middle": [],
"last": "Langmead",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Language Weaver, Inc",
"location": {
"addrLine": "4640 Admiralty Way, Suite 1210 Marina del Rey",
"postCode": "90292",
"region": "CA"
}
},
"email": "glangmead@languageweaver.com"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Cube pruning is a fast inexact method for generating the items of a beam decoder. In this paper, we show that cube pruning is essentially equivalent to A* search on a specific search space with specific heuristics. We use this insight to develop faster and exact variants of cube pruning.",
"pdf_parse": {
"paper_id": "D09-1007",
"_pdf_hash": "",
"abstract": [
{
"text": "Cube pruning is a fast inexact method for generating the items of a beam decoder. In this paper, we show that cube pruning is essentially equivalent to A* search on a specific search space with specific heuristics. We use this insight to develop faster and exact variants of cube pruning.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "In recent years, an intense research focus on machine translation (MT) has raised the quality of MT systems to the degree that they are now viable for a variety of real-world applications. Because of this, the research community has turned its attention to a major drawback of such systems: they are still quite slow. Recent years have seen a flurry of innovative techniques designed to tackle this problem. These include cube pruning (Chiang, 2007) , cube growing (Huang and Chiang, 2007) , early pruning (Moore and Quirk, 2007) , closing spans (Roark and Hollingshead, 2008; Roark and Hollingshead, 2009) , coarse-to-fine methods (Petrov et al., 2008) , pervasive laziness (Pust and Knight, 2009) , and many more.",
"cite_spans": [
{
"start": 435,
"end": 449,
"text": "(Chiang, 2007)",
"ref_id": "BIBREF0"
},
{
"start": 465,
"end": 489,
"text": "(Huang and Chiang, 2007)",
"ref_id": "BIBREF5"
},
{
"start": 506,
"end": 529,
"text": "(Moore and Quirk, 2007)",
"ref_id": "BIBREF6"
},
{
"start": 546,
"end": 576,
"text": "(Roark and Hollingshead, 2008;",
"ref_id": "BIBREF11"
},
{
"start": 577,
"end": 606,
"text": "Roark and Hollingshead, 2009)",
"ref_id": "BIBREF12"
},
{
"start": 632,
"end": 653,
"text": "(Petrov et al., 2008)",
"ref_id": "BIBREF9"
},
{
"start": 675,
"end": 698,
"text": "(Pust and Knight, 2009)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "This massive interest in speed is bringing rapid progress to the field, but it comes with a certain amount of baggage. Each technique brings its own terminology (from the cubes of (Chiang, 2007) to the lazy lists of (Pust and Knight, 2009) ) into the mix. Often, it is not entirely clear why they work. Many apply only to specialized MT situations. Without a deeper understanding of these methods, it is difficult for the practitioner to combine them and adapt them to new use cases.",
"cite_spans": [
{
"start": 180,
"end": 194,
"text": "(Chiang, 2007)",
"ref_id": "BIBREF0"
},
{
"start": 216,
"end": 239,
"text": "(Pust and Knight, 2009)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In this paper, we attempt to bring some clarity to the situation by taking a closer look at one of these existing methods. Specifically, we cast the popular technique of cube pruning (Chiang, 2007) in the well-understood terms of heuristic search (Pearl, 1984) . We show that cube pruning is essentially equivalent to A* search on a specific search space with specific heuristics. This simple observation affords a deeper insight into how and why cube pruning works. We show how this insight enables us to easily develop faster and exact variants of cube pruning for tree-to-string transducer-based MT (Galley et al., 2004; Galley et al., 2006; DeNero et al., 2009) .",
"cite_spans": [
{
"start": 183,
"end": 197,
"text": "(Chiang, 2007)",
"ref_id": "BIBREF0"
},
{
"start": 247,
"end": 260,
"text": "(Pearl, 1984)",
"ref_id": null
},
{
"start": 602,
"end": 623,
"text": "(Galley et al., 2004;",
"ref_id": "BIBREF3"
},
{
"start": 624,
"end": 644,
"text": "Galley et al., 2006;",
"ref_id": "BIBREF4"
},
{
"start": 645,
"end": 665,
"text": "DeNero et al., 2009)",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We begin by describing the problem that cube pruning addresses.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Motivating Example",
"sec_num": "2"
},
{
"text": "Consider a synchronous context-free grammar (SCFG) that includes the following rules:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Motivating Example",
"sec_num": "2"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "A \u2192 A 0 B 1 , A 0 B 1 (1) B \u2192 A 0 B 1 , B 1 A 0 (2) A \u2192 B 0 A 1 , c B 0 b A 1",
"eq_num": "(3)"
}
],
"section": "Motivating Example",
"sec_num": "2"
},
{
"text": "B \u2192 B 0 A 1 , B 0 A 1 (4) Figure 1 shows CKY decoding in progress. CKY is a bottom-up algorithm that works by building objects known as items, over increasingly larger spans of an input sentence (in the context of SCFG decoding, the items represent partial translations of the input sentence). To limit running time, it is common practice to keep only the n \"best\" items per span (this is known as beam decoding). At this point in Figure 1 , every span of size 2 or less has already been filled, and now we want to fill span [2, 5] with the n items of lowest cost. Cube pruning addresses the problem of how to compute the n-best items efficiently. We can be more precise if we introduce some terminology. An SCFG rule has the form X \u2192 \u03c3, \u03d5, \u223c , where X is a nonterminal (called the postcondition), \u03c3, \u03d5 are strings that may contain terminals and nonterminals, and \u223c is a 1-1 correspondence between equivalent nonterminals of \u03c3 and \u03d5. Usually SCFG rules are represented like the example rules (1)-(4). The subscripts indicate corresponding nonterminals (according to \u223c). Define the preconditions of a rule as the ordered sequence of its nonterminals. For clarity of presentation, we will henceforth restrict our focus to binary rules, i.e. rules of the form: Z \u2192 X 0 Y 1 , \u03d5 . Observe that all the rules of our example are binary rules.",
"cite_spans": [
{
"start": 525,
"end": 528,
"text": "[2,",
"ref_id": null
},
{
"start": 529,
"end": 531,
"text": "5]",
"ref_id": null
}
],
"ref_spans": [
{
"start": 26,
"end": 34,
"text": "Figure 1",
"ref_id": "FIGREF0"
},
{
"start": 431,
"end": 439,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Motivating Example",
"sec_num": "2"
},
{
"text": "An item is a triple that contains a span and two strings. We refer to these strings as the postcondition and the carry, respectively. The postcondition tells us which rules may be applied to the item. The carry gives us extra information required to correctly score the item (in SCFG decoding, typically it consists of boundary words for an n-gram language model). 1 To flatten the notation, we will generally represent items as a 4-tuple, e.g.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Motivating Example",
"sec_num": "2"
},
{
"text": "[2, 4, X, a \u22c4 b].",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Motivating Example",
"sec_num": "2"
},
{
"text": "In CKY, new items are created by applying rules to existing items:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Motivating Example",
"sec_num": "2"
},
{
"text": "r : Z \u2192 X 0 Y 1 , \u03d5 [\u03b1, \u03b4, X, \u03ba 1 ] [\u03b4, \u03b2, Y, \u03ba 2 ] [\u03b1, \u03b2, Z, carry(r, \u03ba 1 , \u03ba 2 )]",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Motivating Example",
"sec_num": "2"
},
{
"text": "(5) In other words, we are allowed to apply a rule r to a pair of items \u03b9 1 , \u03b9 2 if the item spans are complementary and preconditions(r) = postcondition(\u03b9 1 ), postcondition(\u03b9 2 ) . The new item has the same postcondition as the applied rule. We form the carry for the new item through an application-dependent function carry that combines the carries of its subitems (e.g. if the carry is n-gram boundary words, then carry computes the new boundary words). As a shorthand, we introduce the notation \u03b9 1 \u22d7 r \u22d6 \u03b9 2 to describe an item created by applying formula (5) to rule r and items \u03b9 1 , \u03b9 2 .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Motivating Example",
"sec_num": "2"
},
{
"text": "When we create a new item, it is scored using the following formula: 2",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Motivating Example",
"sec_num": "2"
},
{
"text": "cost(\u03b9 1 \u22d7 r \u22d6 \u03b9 2 ) cost(r) + cost(\u03b9 1 ) + cost(\u03b9 2 ) + interaction(r, \u03ba 1 , \u03ba 2 ) (6)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Motivating Example",
"sec_num": "2"
},
{
"text": "We assume that each grammar rule r has an associated cost, denoted cost(r). The interaction cost, denoted interaction(r, \u03ba 1 , \u03ba 2 ), uses the carry information to compute cost components that cannot be incorporated offline into the rule costs (again, for our purposes, this is a language model score).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Motivating Example",
"sec_num": "2"
},
{
"text": "Cube pruning addresses the problem of efficiently computing the n items of lowest cost for a given span.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Motivating Example",
"sec_num": "2"
},
{
"text": "Refer again to the example in Figure 1 . We want to fill span [2, 5] . There are 26 distinct ways to apply formula (5), which result in 10 unique items. One approach to finding the lowest-cost n items: perform all 26 distinct inferences, compute the cost of the 10 unique items created, then choose the lowest n.",
"cite_spans": [
{
"start": 62,
"end": 65,
"text": "[2,",
"ref_id": null
},
{
"start": 66,
"end": 68,
"text": "5]",
"ref_id": null
}
],
"ref_spans": [
{
"start": 30,
"end": 38,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Item Generation as Heuristic Search",
"sec_num": "3"
},
{
"text": "The 26 different ways to form the items can be structured as a search tree. See Figure 2 . First we choose the subspans, then the rule preconditions, then the rule, and finally the subitems. Notice that this search space is already quite large, even for such a simple example. In a realistic situation, we are likely to have a search tree with thousands (possibly millions) of nodes, and we may only want to find the best 100 or so goal nodes. To explore this entire search space seems wasteful. Can we do better?",
"cite_spans": [],
"ref_spans": [
{
"start": 80,
"end": 88,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Item Generation as Heuristic Search",
"sec_num": "3"
},
{
"text": "Why not perform heuristic search directly on this search space to find the lowest-cost n items? In order to do this, we just need to add heuristics to the internal nodes of the space.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Item Generation as Heuristic Search",
"sec_num": "3"
},
{
"text": "Before doing so, it will help to elaborate on some of the details of the search tree. Let rules(X, Y) be the subset of rules with preconditions X, Y , sorted by increasing cost. Similarly, Figure 2 : Item creation, structured as a search space. rule(X, Y, k) denotes the k th lowest-cost rule with preconditions X, Y . item(\u03b1, \u03b2, X, k) denotes the k th lowest-cost item of span [\u03b1, \u03b2] with postcondition X. let items(\u03b1, \u03b2, X) be the subset of items with span [\u03b1, \u03b2] and postcondition X, also sorted by increasing cost. Finally, let rule(X, Y, k) denote the k th rule of rules(X, Y) and let item(\u03b1, \u03b2, X, k) denote the k th item of items(\u03b1, \u03b2, X).",
"cite_spans": [],
"ref_spans": [
{
"start": 189,
"end": 197,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Item Generation as Heuristic Search",
"sec_num": "3"
},
{
"text": "A path through the search tree consists of the following sequence of decisions:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Item Generation as Heuristic Search",
"sec_num": "3"
},
{
"text": "1. Set i, j, k to 1.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Item Generation as Heuristic Search",
"sec_num": "3"
},
{
"text": "[\u03b1, \u03b4], [\u03b4, \u03b2].",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Choose the subspans:",
"sec_num": "2."
},
{
"text": "3. Choose the first precondition X of the rule. The lookahead heuristic. We set the heuristics for rule and item nodes by looking ahead at the cost of the greedy solution from that point in the search space. Figure 2 shows two complete search paths for our example, terminated by goal nodes (in black). Notice that the internal nodes of the search space can be classified by the type of decision they govern. To distinguish between these nodes, we will refer to them as subspan nodes, precondition nodes, rule nodes, and item nodes.",
"cite_spans": [],
"ref_spans": [
{
"start": 208,
"end": 216,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Choose the subspans:",
"sec_num": "2."
},
{
"text": "We can now proceed to attach heuristics to the nodes and run a heuristic search protocol, say A*, on this search space. For subspan and precondition nodes, we attach trivial uninformative heuristics, i.e. h = \u2212\u221e. For goal nodes, the heuristic is the actual cost of the item they represent. For rule and item nodes, we will use a simple type of heuristic, often referred to in the literature as a lookahead heuristic. Since the rule nodes and item nodes are ordered, respectively, by rule and item cost, it is possible to \"look ahead\" at a greedy solution from any of those nodes. See Figure 3 . This greedy solution is reached by choosing to accept every decision presented until we hit a goal node.",
"cite_spans": [],
"ref_spans": [
{
"start": 584,
"end": 592,
"text": "Figure 3",
"ref_id": "FIGREF2"
}
],
"eq_spans": [],
"section": "Choose the subspans:",
"sec_num": "2."
},
{
"text": "If these heuristics were admissible (i.e. lower bounds on the cost of the best reachable goal node), this would enable us to exactly generate the n-best items without exhausting the search space (assuming the heuristics are strong enough for A* to do some pruning). Here, the lookahead heuristics are clearly not admissible, however the hope is that A* will generate n \"good\" items, and that the time savings will be worth sacrificing exactness for.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Choose the subspans:",
"sec_num": "2."
},
{
"text": "In this section, we will compare cube pruning with our A* search protocol, by tracing through their respective behaviors on the simple example of Figure 1.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Cube Pruning as Heuristic Search",
"sec_num": "4"
},
{
"text": "To fill span [\u03b1, \u03b2], cube pruning (CP) begins by constructing a cube for each tuple of the form:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Phase 1: Initialization",
"sec_num": "4.1"
},
{
"text": "[\u03b1, \u03b4], [\u03b4, \u03b2], X , Y",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Phase 1: Initialization",
"sec_num": "4.1"
},
{
"text": "where X and Y are nonterminals. A cube consists of three axes: rules(X, Y) and items(\u03b1, \u03b4, X) and items(\u03b4, \u03b2, Y). Figure 4 (left) shows the nontrivial cubes for our example scenario.",
"cite_spans": [],
"ref_spans": [
{
"start": 114,
"end": 122,
"text": "Figure 4",
"ref_id": "FIGREF3"
}
],
"eq_spans": [],
"section": "Phase 1: Initialization",
"sec_num": "4.1"
},
{
"text": "Contrast this with A*, which begins by adding the root node of our search space to an empty heap (ordered by heuristic cost). It proceeds to repeatedly pop the lowest-cost node from the heap, then add its children to the heap (we refer to this operation as visiting the node). Note that before A* ever visits a rule node, it will have visited every subspan and precondition node (because they all have cost h = \u2212\u221e). Figure 4 (right) shows the state of A* at this point in the search. We assume that we do not generate dead-end nodes (a simple matter of checking that there exist applicable rules and items for the chosen subspans and preconditions). Observe the correspondence between the cubes and the heap contents at this point in the A* search.",
"cite_spans": [],
"ref_spans": [
{
"start": 416,
"end": 424,
"text": "Figure 4",
"ref_id": "FIGREF3"
}
],
"eq_spans": [],
"section": "Phase 1: Initialization",
"sec_num": "4.1"
},
{
"text": "Cube pruning proceeds by computing the \"best\" item of each cube",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Phase 2: Seeding the Heap",
"sec_num": "4.2"
},
{
"text": "[\u03b1, \u03b4], [\u03b4, \u03b2], X , Y , i.e. item(\u03b1, \u03b4, X, 1) \u22d7 rule(X, Y, 1) \u22d6 item(\u03b4, \u03b2, Y, 1)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Phase 2: Seeding the Heap",
"sec_num": "4.2"
},
{
"text": "Because of the interaction cost, there is no guarantee that this will really be the best item of the cube, however it is likely to be a good item because the costs of the individual components are low. These items are added to a heap (to avoid confusion, we will henceforth refer to the two heaps as the CP heap and the A* heap), and prioritized by their costs.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Phase 2: Seeding the Heap",
"sec_num": "4.2"
},
{
"text": "Consider again the example. CP seeds its heap with the \"best\" items of the 4 cubes. There is now a direct correspondence between the CP heap and the A* heap. Moreover, the costs associated with the heap elements also correspond. See Figure 5 .",
"cite_spans": [],
"ref_spans": [
{
"start": 233,
"end": 241,
"text": "Figure 5",
"ref_id": "FIGREF4"
}
],
"eq_spans": [],
"section": "Phase 2: Seeding the Heap",
"sec_num": "4.2"
},
{
"text": "Cube pruning now pops the lowest-cost item from the CP heap. This means that CP has decided to keep the item. After doing so, it forms the \"oneoff\" items and pushes those onto the CP heap. See Figure 5 (left). The popped item is:",
"cite_spans": [],
"ref_spans": [
{
"start": 193,
"end": 201,
"text": "Figure 5",
"ref_id": "FIGREF4"
}
],
"eq_spans": [],
"section": "Phase 3: Finding the First Item",
"sec_num": "4.3"
},
{
"text": "item (viii) \u22d7 rule (1) \u22d6 item (xii)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Phase 3: Finding the First Item",
"sec_num": "4.3"
},
{
"text": "CP then pushes the following one-off successors onto the CP heap:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Phase 3: Finding the First Item",
"sec_num": "4.3"
},
{
"text": "item (viii) \u22d7 rule (2) \u22d6 item (xii) item (ix) \u22d7 rule (1) \u22d6 item (xii) item (viii) \u22d7 rule (1) \u22d6 item (xiii)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Phase 3: Finding the First Item",
"sec_num": "4.3"
},
{
"text": "Contrast this with A*, which pops the lowestcost search node from the A* heap. Here we need to assume that our A* protocol differs slightly from standard A*. Specifically, it will practice node-tying, meaning that when it visits a rule node or an item node, then it also (atomically) visits all nodes on the path to its lookahead goal node. See Figure 5 (right). Observe that all of these nodes have the same heuristic cost, thus standard A* is likely to visit these nodes in succession without the need to enforce node-tying, but it would not be guaranteed (because the heuristics are not admissible). A* keeps the goal node it finds and adds the successors to the heap, scored with their lookahead heuristics. Again, note the direct correspondence between what CP and A* keep, and what they add to their respective heaps.",
"cite_spans": [],
"ref_spans": [
{
"start": 345,
"end": 353,
"text": "Figure 5",
"ref_id": "FIGREF4"
}
],
"eq_spans": [],
"section": "Phase 3: Finding the First Item",
"sec_num": "4.3"
},
{
"text": "Cube pruning and A* continue to repeat Phase 3 until k unique items have been kept. While we could continue to trace through the example, by now it should be clear: cube pruning and our A* protocol with node-tying are doing the same thing at each step. In fact, they are exactly the same algorithm. We do not present a formal proof here; this statement should be regarded as confident conjecture.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Phase 4: Finding Subsequent Items",
"sec_num": "4.4"
},
{
"text": "The node-tying turns out to be an unnecessary artifact. In our early experiments, we discovered that node-tying has no impact on speed or quality. Hence, for the remainder of the paper, we view cube pruning in very simple terms: as nothing more than standard A* search on the search space of Section 3. ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Phase 4: Finding Subsequent Items",
"sec_num": "4.4"
},
{
"text": "(r, \u03b9 1 , \u03b9 2 ) cost(\u03b9 1 \u22d7 r \u22d6 \u03b9 2 ).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Phase 4: Finding Subsequent Items",
"sec_num": "4.4"
},
{
"text": "Viewed in this light, the idiosyncracies of cube pruning begin to reveal themselves. On the one hand, rule and item nodes are associated with strong but inadmissible heuristics (the short explanation for why cube pruning is an inexact algorithm). On the other hand, subspan and precondition nodes are associated with weak trivial heuristics. This should be regarded neither as a surprise nor a criticism, considering cube pruning's origins in hierarchical phrase-based MT models (Chiang, 2007) , which have only a small number of distinct nonterminals.",
"cite_spans": [
{
"start": 479,
"end": 493,
"text": "(Chiang, 2007)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Augmented Cube Pruning",
"sec_num": "5"
},
{
"text": "But the situation is much different in treeto-string transducer-based MT (Galley et al., 2004; Galley et al., 2006; DeNero et al., 2009) . Transducer-based MT relies on SCFGs with large nonterminal sets. Binarizing the grammars (Zhang et al., 2006) further increases the size of these sets, due to the introduction of virtual nonterminals.",
"cite_spans": [
{
"start": 73,
"end": 94,
"text": "(Galley et al., 2004;",
"ref_id": "BIBREF3"
},
{
"start": 95,
"end": 115,
"text": "Galley et al., 2006;",
"ref_id": "BIBREF4"
},
{
"start": 116,
"end": 136,
"text": "DeNero et al., 2009)",
"ref_id": "BIBREF2"
},
{
"start": 228,
"end": 248,
"text": "(Zhang et al., 2006)",
"ref_id": "BIBREF14"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Augmented Cube Pruning",
"sec_num": "5"
},
{
"text": "A key benefit of the heuristic search viewpoint is that it is well positioned to take advantage of such insights into the structure of a particular decoding problem. In the case of transducer-based MT, the large set of preconditions encourages us to introduce a nontrivial heuristic for the precondition nodes. The inclusion of these heuristics into the CP search will enable A* to eliminate certain preconditions from consideration, giving us a speedup. For this reason we call this strategy augmented cube pruning.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Augmented Cube Pruning",
"sec_num": "5"
},
{
"text": "Recall that the total cost of a goal node is given by Equation (6), which has four terms. We will form the heuristic for a precondition node by creating a separate heuristic for each of the four terms and using the sum as the overall heuristic.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Heuristics on preconditions",
"sec_num": "5.1"
},
{
"text": "To describe these heuristics, we will make intuitive use of the wildcard operator * to extend our existing notation. For instance, items(\u03b1, \u03b2, *) will denote the union of items(\u03b1, \u03b2, X) over all possible X, sorted by cost.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Heuristics on preconditions",
"sec_num": "5.1"
},
{
"text": "We associate the heuristic h(\u03b4, X, Y) with the search node reached by choosing subspans [\u03b1, \u03b4], [\u03b4, \u03b2], precondition X (for span [\u03b1, \u03b4]), and precondition Y (for span [\u03b4, \u03b2] ). The heuristic is the sum of four terms, mirroring Equation 6:",
"cite_spans": [
{
"start": 167,
"end": 173,
"text": "[\u03b4, \u03b2]",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Heuristics on preconditions",
"sec_num": "5.1"
},
{
"text": "h(\u03b4, X, Y) = cost(rule(X, Y, 1)) + cost(item(\u03b1, \u03b4, X, 1)) + cost(item(\u03b4, \u03b2, Y, 1)) + ih(\u03b4, X, Y)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Heuristics on preconditions",
"sec_num": "5.1"
},
{
"text": "The first three terms are admissible because each is simply the minimum possible cost of some choice remaining to be made. To construct the interaction heuristic ih(\u03b4, X, Y), consider that in a translation model with an integrated n-gram language model, the interaction cost interaction(r, \u03ba 1 , \u03ba 2 ) is computed by adding the language model costs of any new complete ngrams that are created by combining the carries (boundary words) with each other and with the lexical items on the rule's target side, taking into account any reordering that the rule may perform. We construct a backoff-style estimate of these new n-grams by looking at item(\u03b1, \u03b4,",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Heuristics on preconditions",
"sec_num": "5.1"
},
{
"text": "X, 1) = [\u03b1, \u03b4, X, \u03ba 1 ], item(\u03b4, \u03b2, Y, 1) = [\u03b4, \u03b2, Y, \u03ba 2 ]",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Heuristics on preconditions",
"sec_num": "5.1"
},
{
"text": ", and rule(X, Y, 1). We set ih(\u03b4, X, Y) to be a linear combination of the backoff n-grams of the carries \u03ba 1 and \u03ba 2 , as well as any n-grams introduced by the rule. For instance, if",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Heuristics on preconditions",
"sec_num": "5.1"
},
{
"text": "\u03ba 1 = a b \u22c4 c d \u03ba 2 = e f \u22c4 g h rule(X, Y, 1) = Z \u2192 X 0 Y 1 , X 0 g h i Y 1 then ih(\u03b4, X, Y) = \u03b3 1 \u2022 LM(a) + \u03b3 2 \u2022 LM(a b) + \u03b3 1 \u2022 LM(e) + \u03b3 2 \u2022 LM(e f) + \u03b3 1 \u2022 LM(g) + \u03b3 2 \u2022 LM(g h) + \u03b3 3 \u2022 LM(g h i)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Heuristics on preconditions",
"sec_num": "5.1"
},
{
"text": "The coefficients of the combination are free parameters that we can tune to trade off between more pruning and more admissability. Setting the coefficients to zero gives perfect admissibility but is also weak.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Heuristics on preconditions",
"sec_num": "5.1"
},
{
"text": "The heuristic for the first precondition node is computed similarly:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Heuristics on preconditions",
"sec_num": "5.1"
},
{
"text": "h(\u03b4, X, * ) = cost(rule(X, * , 1)) + cost(item(\u03b1, \u03b4, X, 1)) + cost(item(\u03b4, \u03b2, * , 1)) + ih(\u03b4, X, * ) Standard CP",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Heuristics on preconditions",
"sec_num": "5.1"
},
{
"text": "Augmented CP nodes (k) BLEU time nodes (k) BLEU time 80 34.9 2.5 52 34.7 1.9 148 36.1 3.9 92 35.9 2.4 345 37.2 7.9 200 37.3 5.4 520 37.7 13.4 302 37.7 8. Figure 6 : Nodes visited by standard and augmented cube pruning.",
"cite_spans": [],
"ref_spans": [
{
"start": 154,
"end": 162,
"text": "Figure 6",
"ref_id": null
}
],
"eq_spans": [],
"section": "Heuristics on preconditions",
"sec_num": "5.1"
},
{
"text": "We also apply analogous heuristics to the subspan nodes.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Heuristics on preconditions",
"sec_num": "5.1"
},
{
"text": "We evaluated all of the algorithms in this paper on a syntax-based Arabic-English translation system based on (Galley et al., 2006) , with rules extracted from 200 million words of parallel data from NIST 2008 and GALE data collections, and with a 4gram language model trained on 1 billion words of monolingual English data from the LDC Gigaword corpus. We evaluated the system's performance on the NIST 2008 test corpus, which consists of 1357 Arabic sentences from a mixture of newswire and web domains, with four English reference translations. We report BLEU scores (Papineni et al., 2002) on untokenized, recapitalized output.",
"cite_spans": [
{
"start": 110,
"end": 131,
"text": "(Galley et al., 2006)",
"ref_id": "BIBREF4"
},
{
"start": 570,
"end": 593,
"text": "(Papineni et al., 2002)",
"ref_id": "BIBREF7"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Experimental setup",
"sec_num": "5.2"
},
{
"text": "The results for augmented cube pruning are compared against cube pruning in from that table are also plotted in Figure 6 and Figure 7 . Each line gives the number of nodes visited by the heuristic search, the average time to decode one sentence, and the BLEU of the output. The number of items kept by each span (the beam) is increased in each subsequent line of the table to indicate how the two algorithms differ at various beam sizes. This also gives a more complete picture of the speed/BLEU tradeoff offered by each algorithm. Because the two algorithms make the same sorts of lookahead computations with the same implementation, they can be most directly compared by examining the number of visited nodes. Augmenting cube pruning with admissible heuristics on the precondition nodes leads to a substantial decrease in visited nodes, by 35-44%. The reduction in nodes converges to a consistent 40% as the beam increases. The BLEU with augmented cube pruning drops by an average of 0.1 compared to standard cube pruning. This is due to the additional inadmissibility of the interaction heuristic.",
"cite_spans": [],
"ref_spans": [
{
"start": 112,
"end": 120,
"text": "Figure 6",
"ref_id": null
},
{
"start": 125,
"end": 133,
"text": "Figure 7",
"ref_id": null
}
],
"eq_spans": [],
"section": "Results for Augmented Cube Pruning",
"sec_num": "5.3"
},
{
"text": "To see in more detail how the heuristics affect the search, we give in Table 2 the number of nodes of each type visited by both variants for one beam size. The precondition heuristic enables A* to prune more than half the precondition nodes.",
"cite_spans": [],
"ref_spans": [
{
"start": 71,
"end": 78,
"text": "Table 2",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Results for Augmented Cube Pruning",
"sec_num": "5.3"
},
{
"text": "Common wisdom is that the speed of cube pruning more than compensates for its inexactness (recall that this inexactness is due to the fact that it uses A* search with inadmissible heuristics). Especially when we move into transducer-based MT, the search space becomes so large that brute-force item generation is much too slow to be practical. Still, within the heuristic search framework we may ask the question: is it possible to apply strictly admissible heuristics to the cube pruning search space, and in so doing, create a version of cube pruning that is both fast and exact, one that finds the n best items for each span and not just n good items? One might not expect such a technique to outperform cube pruning in practice, but for a given use case, it would give us a relatively fast way of assessing the BLEU drop incurred by the inexactness of cube pruning.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Exact Cube Pruning",
"sec_num": "6"
},
{
"text": "Recall again that the total cost of a goal node is given by Equation 6, which has four terms. It is easy enough to devise strong lower bounds for the first three of these terms by extending the reasoning of Section 5. Table 3 shows these heuristics. The major challenge is to devise an effective lower bound on the fourth term of the cost function, the interaction heuristic, which in our case is the incremental language model cost.",
"cite_spans": [],
"ref_spans": [
{
"start": 218,
"end": 225,
"text": "Table 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "Exact Cube Pruning",
"sec_num": "6"
},
{
"text": "We take advantage of the following observations:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Exact Cube Pruning",
"sec_num": "6"
},
{
"text": "1. In a given span, many boundary word patterns are repeated. In other words, for a particular span [\u03b1, \u03b2] and carry \u03ba, we often see many items of the form [\u03b1, \u03b2, X, \u03ba], where the only difference is the postcondition X.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Exact Cube Pruning",
"sec_num": "6"
},
{
"text": "2. Most rules do not introduce lexical items. In other words, most of the grammar rules have the form Z",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Exact Cube Pruning",
"sec_num": "6"
},
{
"text": "\u2192 X 0 Y 1 , X 0 Y 1 (concatena- tion rules) or Z \u2192 X 0 Y 1 , Y 1 X 0 (inver- sion rules).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Exact Cube Pruning",
"sec_num": "6"
},
{
"text": "The idea is simple. We split the search into three searches: one for concatenation rules, one for inversion rules, and one for lexical rules. Each search finds the n-best items that can be created using its respective set of rules. We then take these 3n items and keep the best n. Doing this split enables us to precompute a strong and admissible heuristic on the interaction cost. Namely, for a given span [\u03b1, \u03b2], we precompute ih adm (\u03b4, X, Y), which is the best LM cost of combining carries from items(\u03b1, \u03b4, X) and items(\u03b4, \u03b2, Y). Notice that this statistic is only straightforward to compute once we can assume that the rules are concatenation rules or inversion rules. For the lexical rules, we set ih adm (\u03b4, X, Y) = 0, an admissible but weak heuristic that we can fortunately get away with because of the small number of lexical rules.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Exact Cube Pruning",
"sec_num": "6"
},
{
"text": "Computing the ih adm (\u03b4, X, Y) heuristic is not cheap. To be fair, we first compare exact CP to standard CP in terms of overall running time, including the computational cost of this overhead. We plot this comparison in Figure 8 . Surprisingly, the time/quality tradeoff of exact CP is extremely similar to standard CP, suggesting that exact cube pruning is actually a practical alternative to standard CP, and not just of theoretical value. We found that the BLEU loss of standard cube pruning at moderate beam sizes was between 0.4 and 0.6. Another surprise comes when we contrast the number of visited search nodes of exact CP and standard CP. See Figure 9 . While we initially expected that exact CP must visit fewer nodes to make up for the computational overhead of its expensive heuristics, this did not turn out to be the case, suggesting that the computational cost of standard CP's lookahead heuristics is just as expensive as the precomputation of ih adm (\u03b4, X, Y).",
"cite_spans": [],
"ref_spans": [
{
"start": 220,
"end": 228,
"text": "Figure 8",
"ref_id": "FIGREF5"
},
{
"start": 651,
"end": 659,
"text": "Figure 9",
"ref_id": "FIGREF6"
}
],
"eq_spans": [],
"section": "Results for Exact Cube Pruning",
"sec_num": "6.1"
},
{
"text": "subspan precondition1 precondition2 rule item1 item2 h(\u03b4) h(\u03b4, X) h(\u03b4, X, Y) h(\u03b4, X, Y, i) h(\u03b4, X, Y, i, j) h(\u03b4, X, Y, i, j, k) r rule( * , * , 1) rule(X, * , 1) rule(X, Y, 1) rule(X, Y, i) rule(X, Y, i) rule(X, Y, i) \u03b9 1 item(\u03b1, \u03b4, * , 1) item(\u03b1, \u03b4, X, 1) item(\u03b1, \u03b4, X, 1) item(\u03b1, \u03b4, X, 1) item(\u03b1, \u03b4, X, j) item(\u03b1, \u03b4, X, j) \u03b9 2 item(\u03b4, \u03b2, * , 1) item(\u03b4, \u03b2, * , 1) item(\u03b4, \u03b2, Y, 1) item(\u03b4, \u03b2, Y, 1) item(\u03b4, \u03b2, Y, 1) item(\u03b4, \u03b2, Y, k)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "heuristic components",
"sec_num": null
},
{
"text": "ih ih adm (\u03b4, * , * ) ih adm (\u03b4, X, * ) ih adm (\u03b4, X, Y) ih adm (\u03b4, X, Y) ih adm (\u03b4, X, Y) ih adm (\u03b4, X, Y) Table 3 : Admissible heuristics for exact CP. We attach heuristic h(\u03b4, X, Y, i, j, k) to the search node reached by choosing subspans [\u03b1, \u03b4] , [\u03b4, \u03b2] , preconditions X and Y, the i th rule of rules(X, Y), the j th item of item(\u03b1, \u03b4, X), and the k th item of item(\u03b4, \u03b2, Y). To form the heuristic for a particular type of search node (column), compute the following: cost(r) + cost(\u03b9 1 ) + cost(\u03b9 2 ) + ih ",
"cite_spans": [
{
"start": 242,
"end": 248,
"text": "[\u03b1, \u03b4]",
"ref_id": null
},
{
"start": 251,
"end": 257,
"text": "[\u03b4, \u03b2]",
"ref_id": null
}
],
"ref_spans": [
{
"start": 108,
"end": 115,
"text": "Table 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "heuristic components",
"sec_num": null
},
{
"text": "This paper's core idea is the utility of framing CKY item generation as a heuristic search problem. Once we recognize cube pruning as nothing more than A* on a particular search space with particular heuristics, this deeper understanding makes it easy to create faster and exact variants for other use cases (in this paper, we focus on tree-to-string transducer-based MT). Depending on one's own particular use case, a variety of possibilities may present themselves:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Implications",
"sec_num": "7"
},
{
"text": "1. What if we try different heuristics? In this paper, we do some preliminary inquiry into this question, but it should be clear that our minor changes are just the tip of the iceberg. One can easily imagine clever and creative heuristics that outperform the simple ones we have proposed here.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Implications",
"sec_num": "7"
},
{
"text": "2. What if we try a different search space? Why are we using this particular search space? Perhaps a different one, one that makes de-cisions in a different order, would be more effective.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Implications",
"sec_num": "7"
},
{
"text": "A* has nice guarantees (Dechter and Pearl, 1985) , but it is space-consumptive and it is not anytime. For a use case where we would like a finer-grained speed/quality tradeoff, it might be useful to consider an anytime search algorithm, like depth-first branch-and-bound (Zhang and Korf, 1995) .",
"cite_spans": [
{
"start": 23,
"end": 48,
"text": "(Dechter and Pearl, 1985)",
"ref_id": "BIBREF1"
},
{
"start": 271,
"end": 293,
"text": "(Zhang and Korf, 1995)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "What if we try a different search algorithm?",
"sec_num": "3."
},
{
"text": "By working towards a deeper and unifying understanding of the smorgasbord of current MT speedup techniques, our hope is to facilitate the task of implementing such methods, combining them effectively, and adapting them to new use cases.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "What if we try a different search algorithm?",
"sec_num": "3."
},
{
"text": "Note that the carry is a generic concept that can store any kind of non-local scoring information.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Without loss of generality, we assume an additive cost function.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "We would like to thank Abdessamad Echihabi, Kevin Knight, Daniel Marcu, Dragos Munteanu, Ion Muslea, Radu Soricut, Wei Wang, and the anonymous reviewers for helpful comments and advice. Thanks also to David Chiang for the use of his LaTeX macros. This work was supported in part by CCS grant 2008-1245117-000.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgments",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Hierarchical phrase-based translation",
"authors": [
{
"first": "David",
"middle": [],
"last": "Chiang",
"suffix": ""
}
],
"year": 2007,
"venue": "Computational Linguistics",
"volume": "33",
"issue": "2",
"pages": "201--228",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "David Chiang. 2007. Hierarchical phrase-based trans- lation. Computational Linguistics, 33(2):201-228.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Generalized bestfirst search strategies and the optimality of a*",
"authors": [
{
"first": "Rina",
"middle": [],
"last": "Dechter",
"suffix": ""
},
{
"first": "Judea",
"middle": [],
"last": "Pearl",
"suffix": ""
}
],
"year": 1985,
"venue": "Journal of the ACM",
"volume": "32",
"issue": "3",
"pages": "505--536",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Rina Dechter and Judea Pearl. 1985. Generalized best- first search strategies and the optimality of a*. Jour- nal of the ACM, 32(3):505-536.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Efficient parsing for transducer grammars",
"authors": [
{
"first": "John",
"middle": [],
"last": "Denero",
"suffix": ""
},
{
"first": "Mohit",
"middle": [],
"last": "Bansal",
"suffix": ""
},
{
"first": "Adam",
"middle": [],
"last": "Pauls",
"suffix": ""
},
{
"first": "Dan",
"middle": [],
"last": "Klein",
"suffix": ""
}
],
"year": 2009,
"venue": "Proceedings of the Human Language Technology Conference of the NAACL, Main Conference",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "John DeNero, Mohit Bansal, Adam Pauls, and Dan Klein. 2009. Efficient parsing for transducer gram- mars. In Proceedings of the Human Language Tech- nology Conference of the NAACL, Main Conference.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "What's in a translation rule",
"authors": [
{
"first": "Michel",
"middle": [],
"last": "Galley",
"suffix": ""
},
{
"first": "Mark",
"middle": [],
"last": "Hopkins",
"suffix": ""
},
{
"first": "Kevin",
"middle": [],
"last": "Knight",
"suffix": ""
},
{
"first": "Daniel",
"middle": [],
"last": "Marcu",
"suffix": ""
}
],
"year": 2004,
"venue": "Proceedings of HLT/NAACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Michel Galley, Mark Hopkins, Kevin Knight, and Daniel Marcu. 2004. What's in a translation rule? In Proceedings of HLT/NAACL.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Scalable inference and training of context-rich syntactic models",
"authors": [
{
"first": "Michel",
"middle": [],
"last": "Galley",
"suffix": ""
},
{
"first": "Jonathan",
"middle": [],
"last": "Graehl",
"suffix": ""
},
{
"first": "Kevin",
"middle": [],
"last": "Knight",
"suffix": ""
},
{
"first": "Daniel",
"middle": [],
"last": "Marcu",
"suffix": ""
},
{
"first": "Steve",
"middle": [],
"last": "Deneefe",
"suffix": ""
},
{
"first": "Wei",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Ignacio",
"middle": [],
"last": "Thayer",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of ACL-COLING",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Michel Galley, Jonathan Graehl, Kevin Knight, Daniel Marcu, Steve DeNeefe, Wei Wang, and Ignacio Thayer. 2006. Scalable inference and training of context-rich syntactic models. In Proceedings of ACL-COLING.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Forest rescoring: Faster decoding with integrated language models",
"authors": [
{
"first": "Liang",
"middle": [],
"last": "Huang",
"suffix": ""
},
{
"first": "David",
"middle": [],
"last": "Chiang",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of ACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Liang Huang and David Chiang. 2007. Forest rescor- ing: Faster decoding with integrated language mod- els. In Proceedings of ACL.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Faster beam-search decoding for phrasal statistical machine translation",
"authors": [
{
"first": "C",
"middle": [],
"last": "Robert",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "Moore",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Quirk",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of MT Summit XI",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Robert C. Moore and Chris Quirk. 2007. Faster beam-search decoding for phrasal statistical ma- chine translation. In Proceedings of MT Summit XI.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Bleu: a method for automatic evaluation of machine translation",
"authors": [
{
"first": "Kishore",
"middle": [],
"last": "Papineni",
"suffix": ""
},
{
"first": "Salim",
"middle": [],
"last": "Roukos",
"suffix": ""
},
{
"first": "Todd",
"middle": [],
"last": "Ward",
"suffix": ""
},
{
"first": "Wei-Jing",
"middle": [],
"last": "Zhu",
"suffix": ""
}
],
"year": 2002,
"venue": "Proceedings of 40th Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "311--318",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of 40th Annual Meeting of the Association for Com- putational Linguistics, pages 311-318.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Coarse-to-fine syntactic machine translation using language projections",
"authors": [
{
"first": "Slav",
"middle": [],
"last": "Petrov",
"suffix": ""
},
{
"first": "Aria",
"middle": [],
"last": "Haghighi",
"suffix": ""
},
{
"first": "Dan",
"middle": [],
"last": "Klein",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of EMNLP",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Slav Petrov, Aria Haghighi, and Dan Klein. 2008. Coarse-to-fine syntactic machine translation using language projections. In Proceedings of EMNLP.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Faster mt decoding through pervasive laziness",
"authors": [
{
"first": "Michael",
"middle": [],
"last": "Pust",
"suffix": ""
},
{
"first": "Kevin",
"middle": [],
"last": "Knight",
"suffix": ""
}
],
"year": 2009,
"venue": "Proceedings of NAACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Michael Pust and Kevin Knight. 2009. Faster mt de- coding through pervasive laziness. In Proceedings of NAACL.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Classifying chart cells for quadratic complexity contextfree inference",
"authors": [
{
"first": "Brian",
"middle": [],
"last": "Roark",
"suffix": ""
},
{
"first": "Kristy",
"middle": [],
"last": "Hollingshead",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of the 22nd International Conference on Computational Linguistics",
"volume": "",
"issue": "",
"pages": "745--752",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Brian Roark and Kristy Hollingshead. 2008. Classi- fying chart cells for quadratic complexity context- free inference. In Proceedings of the 22nd Inter- national Conference on Computational Linguistics (Coling 2008), pages 745-752.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Linear complexity context-free parsing pipelines via chart constraints",
"authors": [
{
"first": "Brian",
"middle": [],
"last": "Roark",
"suffix": ""
},
{
"first": "Kristy",
"middle": [],
"last": "Hollingshead",
"suffix": ""
}
],
"year": 2009,
"venue": "Proceedings of Human Language Technologies: The 2009 Annual Conference of the North American Chapter of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "647--655",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Brian Roark and Kristy Hollingshead. 2009. Lin- ear complexity context-free parsing pipelines via chart constraints. In Proceedings of Human Lan- guage Technologies: The 2009 Annual Conference of the North American Chapter of the Associa- tion for Computational Linguistics, pages 647-655, Boulder, Colorado, June. Association for Computa- tional Linguistics.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Performance of linear-space search algorithms",
"authors": [
{
"first": "Weixiong",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Richard",
"middle": [
"E"
],
"last": "Korf",
"suffix": ""
}
],
"year": 1995,
"venue": "Artificial Intelligence",
"volume": "79",
"issue": "2",
"pages": "241--292",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Weixiong Zhang and Richard E. Korf. 1995. Perfor- mance of linear-space search algorithms. Artificial Intelligence, 79(2):241-292.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Synchronous binarization for machine translation",
"authors": [
{
"first": "Hao",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Liang",
"middle": [],
"last": "Huang",
"suffix": ""
},
{
"first": "Daniel",
"middle": [],
"last": "Gildea",
"suffix": ""
},
{
"first": "Kevin",
"middle": [],
"last": "Knight",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of the Human Language Technology Conference of the NAACL, Main Conference",
"volume": "",
"issue": "",
"pages": "256--263",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hao Zhang, Liang Huang, Daniel Gildea, and Kevin Knight. 2006. Synchronous binarization for ma- chine translation. In Proceedings of the Human Lan- guage Technology Conference of the NAACL, Main Conference, pages 256-263.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"uris": null,
"text": "CKY decoding in progress. We want to fill span[2,5] with the lowest cost items.",
"num": null,
"type_str": "figure"
},
"FIGREF1": {
"uris": null,
"text": "4. Choose the second precondition Y of the rule. 5. While rule not yet accepted and i < |rules(X, Y)|: (a) Choose to accept/reject rule(X, Y, i). If reject, then increment i. 6. While item not yet accepted for subspan [\u03b1, \u03b4] and j < |items(\u03b1, \u03b4, X)|: (a) Choose to accept/reject item(\u03b1, \u03b4, X, j). If reject, then increment j. 7. While item not yet accepted for subspan [\u03b4, \u03b2] and k < |items(\u03b4, \u03b2, Y)|: (a) Choose to accept/reject item(\u03b4, \u03b2, Y, k). If reject, then increment k.",
"num": null,
"type_str": "figure"
},
"FIGREF2": {
"uris": null,
"text": "Figure 3: The lookahead heuristic. We set the heuristics for rule and item nodes by looking ahead at the cost of the greedy solution from that point in the search space.",
"num": null,
"type_str": "figure"
},
"FIGREF3": {
"uris": null,
"text": "(left) Cube formation for our example. (right) The A* protocol, after all subspan and precondition nodes have been visited. Notice the correspondence between the cubes and the A* heap contents.",
"num": null,
"type_str": "figure"
},
"FIGREF4": {
"uris": null,
"text": "(left) One step of cube pruning. (right) One step of the A* protocol. In this figure, cost",
"num": null,
"type_str": "figure"
},
"FIGREF5": {
"uris": null,
"text": "Time spent by standard and exact cube pruning, average seconds per sentence.",
"num": null,
"type_str": "figure"
},
"FIGREF6": {
"uris": null,
"text": "Nodes visited by standard and exact cube pruning.",
"num": null,
"type_str": "figure"
},
"TABREF1": {
"text": "",
"html": null,
"num": null,
"content": "<table><tr><td>. The data</td></tr></table>",
"type_str": "table"
},
"TABREF2": {
"text": "Breakdown of visited search nodes by type (for a fixed beam size).",
"html": null,
"num": null,
"content": "<table/>",
"type_str": "table"
}
}
}
} |